2015-05-17 22:43:53 +02:00
#!/usr/bin/env bash
#
2015-09-17 15:30:15 +02:00
# vim:ts=5:sw=5:expandtab
# we have a spaces softtab, that ensures readability with other editors too
2015-06-19 20:36:32 +02:00
2016-01-23 19:18:33 +01:00
[ -z " $BASH_VERSINFO " ] && printf "\n\033[1;35m Please make sure you're using \"bash\"! Bye...\033[m\n\n" >& 2 && exit 245
2015-09-02 12:56:03 +02:00
[ $( kill -l | grep -c SIG) -eq 0 ] && printf "\n\033[1;35m Please make sure you're calling me without leading \"sh\"! Bye...\033[m\n\n" >& 2 && exit 245
2015-06-19 20:36:32 +02:00
2015-05-29 19:44:27 +02:00
# testssl.sh is a program for spotting weak SSL encryption, ciphers, version and some
2015-05-29 19:56:57 +02:00
# vulnerabilities or features
2015-05-17 22:43:53 +02:00
#
2015-05-27 11:19:30 +02:00
# Devel version is available from https://github.com/drwetter/testssl.sh
# Stable version from https://testssl.sh
# Please file bugs at github! https://github.com/drwetter/testssl.sh/issues
2015-05-17 22:43:53 +02:00
2015-11-15 00:19:13 +01:00
# Main author: Dirk Wetter, copyleft: 2007-today, contributions so far see CREDITS.md
2015-05-17 22:43:53 +02:00
#
# License: GPLv2, see http://www.fsf.org/licensing/licenses/info/GPLv2.html
# and accompanying license "LICENSE.txt". Redistribution + modification under this
2015-05-29 19:44:27 +02:00
# license permitted.
2015-05-17 22:43:53 +02:00
# If you enclose this script or parts of it in your software, it has to
# be accompanied by the same license (see link) and the place where to get
2016-04-21 18:04:33 +02:00
# the recent version of this program. Do not violate the license and if
2016-01-15 17:04:16 +01:00
# you do not agree to all of these terms, do not use it in the first place.
2015-05-17 22:43:53 +02:00
#
2016-04-21 18:04:33 +02:00
# OpenSSL, which is being used and maybe distributed via one of this projects'
# web sites, is subject to their licensing: https://www.openssl.org/source/license.txt
2016-01-23 19:18:33 +01:00
#
2016-01-15 17:04:16 +01:00
# The client simulation data comes from SSLlabs and is licensed to the 'Qualys SSL Labs
# Terms of Use' (v2.2), see https://www.ssllabs.com/downloads/Qualys_SSL_Labs_Terms_of_Use.pdf,
# stating a CC BY 3.0 US license: https://creativecommons.org/licenses/by/3.0/us/
#
2016-01-23 19:18:33 +01:00
# Please note: USAGE WITHOUT ANY WARRANTY, THE SOFTWARE IS PROVIDED "AS IS".
2016-01-15 17:04:16 +01:00
#
2016-01-23 19:18:33 +01:00
# USE IT AT your OWN RISK!
2016-01-15 17:04:16 +01:00
# Seriously! The threat is you run this code on your computer and input could be /
# is being supplied via untrusted sources.
2015-05-17 22:43:53 +02:00
2016-01-23 19:18:33 +01:00
# HISTORY:
2015-09-03 12:14:47 +02:00
# Back in 2006 it all started with a few openssl commands...
2016-01-23 19:18:33 +01:00
# That's because openssl is a such a good swiss army knife (see e.g.
# wiki.openssl.org/index.php/Command_Line_Utilities) that it was difficult to resist
# wrapping some shell commands around it, which I used for my pen tests. This is how
2015-09-03 12:14:47 +02:00
# everything started.
2016-04-21 18:04:33 +02:00
# Now it has grown up, it has bash socket support for some features, which is basically replacing
2015-05-17 22:43:53 +02:00
# more and more functions of OpenSSL and will serve as some kind of library in the future.
# The socket checks in bash may sound cool and unique -- they are -- but probably you
2015-05-27 11:19:30 +02:00
# can achieve e.g. the same result with my favorite interactive shell: zsh (zmodload zsh/net/socket
2016-01-23 19:18:33 +01:00
# -- checkout zsh/net/tcp) too!
2015-09-03 12:14:47 +02:00
# /bin/bash though is way more often used within Linux and it's perfect
2017-06-01 15:47:38 +02:00
# for cross platform support, see MacOS X and also under Windows the MSYS2 extension or Cygwin
# as well as Bash on Windows (WSL)
2015-09-03 12:14:47 +02:00
# Cross-platform is one of the three main goals of this script. Second: Ease of installation.
2015-05-27 11:19:30 +02:00
# No compiling, install gems, go to CPAN, use pip etc. Third: Easy to use and to interpret
2015-09-03 12:14:47 +02:00
# the results.
2015-05-27 11:19:30 +02:00
# Did I mention it's open source?
2015-09-03 12:14:47 +02:00
2015-08-18 16:07:24 +02:00
# Q: So what's the difference to www.ssllabs.com/ssltest/ or sslcheck.globalsign.com/ ?
2015-07-06 10:10:46 +02:00
# A: As of now ssllabs only check 1) webservers 2) on standard ports, 3) reachable from the
2015-09-03 12:14:47 +02:00
# internet. And those examples above 4) are 3rd parties. If these restrictions are all fine
2015-07-06 10:10:46 +02:00
# with you and you need a management compatible rating -- go ahead and use those.
2015-09-03 12:14:47 +02:00
2016-01-23 19:18:33 +01:00
# But also if your fine with those restrictions: testssl.sh is meant as a tool in your hand
# and it's way more flexible.
2015-05-17 22:43:53 +02:00
#
2015-05-27 11:19:30 +02:00
# Oh, and did I mention testssl.sh is open source?
2015-09-03 12:14:47 +02:00
# Note that up to today there were a lot changes for "standard" openssl
# binaries: a lot of features (ciphers, protocols, vulnerabilities)
# are disabled as they'll impact security otherwise. For security
# testing though we need all broken features. testssl.sh will
# over time replace those checks with bash sockets -- however it's
2016-01-23 19:18:33 +01:00
# still recommended to use the supplied binaries or cook your own, see
2015-09-03 13:26:02 +02:00
# https://github.com/drwetter/testssl.sh/blob/master/bin/Readme.md .
2015-09-03 12:14:47 +02:00
# Don't worry if feature X is not available you'll get a warning about
2015-09-17 15:30:15 +02:00
# this missing feature! The idea is if this script can't tell something
2015-09-03 12:14:47 +02:00
# for sure it speaks up so that you have clear picture.
2015-06-02 15:53:46 +02:00
# debugging help:
2017-03-25 19:37:30 +01:00
readonly PS4 = '|${LINENO}> \011${FUNCNAME[0]:+${FUNCNAME[0]}(): }'
2017-03-27 08:59:29 +02:00
# see stackoverflow.com/questions/5014823/how-to-profile-a-bash-shell-script-slow-startup#20855353
# how to paste both in order to do performance analysis
2017-03-29 11:17:24 +02:00
DEBUGTIME = ${ DEBUGTIME :- false }
2017-04-06 11:33:54 +02:00
DEBUG_ALLINONE = ${ DEBUG_ALLINONE :- false } # true: do debugging in one sceen (old behaviour for testssl.sh and bash3's default
2017-05-19 20:28:18 +02:00
# false: needed for performance analysis or useful for just having an extra file
2017-10-10 16:34:33 +02:00
DEBUG_ALLINONE = ${ SETX :- false } # SETX as a shortcut for old style debugging, overriding DEBUG_ALLINONE
2017-10-09 15:13:46 +02:00
2017-05-19 20:28:18 +02:00
2017-03-25 19:37:30 +01:00
if grep -q xtrace <<< " $SHELLOPTS " ; then
2017-03-29 11:17:24 +02:00
if " $DEBUGTIME " ; then
2017-03-25 19:37:30 +01:00
# separate debugging, doesn't mess up the screen, $DEBUGTIME determines whether we also do performance analysis
exec 42>& 2 2> >( tee /tmp/testssl-$$ .log | sed -u 's/^.*$/now/' | date -f - +%s.%N >/tmp/testssl-$$ .time)
2017-03-29 11:17:24 +02:00
# BASH_XTRACEFD=42
2017-03-27 08:59:29 +02:00
else
if ! " $DEBUG_ALLINONE " ; then
exec 42>| /tmp/testssl-$$ .log
BASH_XTRACEFD = 42
fi
2017-03-25 19:37:30 +01:00
fi
fi
2015-06-02 15:53:46 +02:00
2015-06-16 19:53:40 +02:00
# make sure that temporary files are cleaned up after use in ANY case
2015-06-02 15:53:46 +02:00
trap "cleanup" QUIT EXIT
Stop parent if child encounters parsing error
This PR implements the suggestion from #753 for a child process in mass testing to send a signal to the parent to exit if the child encounters an error parsing its command line. At the moment, the child only sends the signal if it encounters an error that results in the `help()` function being called, but that could easily be changed (e.g., to also send a signal if `fatal()` is called in the child process).
In the case of parallel mass testing, the cleanup function needs to call `get_next_message_testing_parallel_result()` for the child that sent the signal, since otherwise the child's error message would not be displayed. Since I cannot tell which child sent the signal, I just call `cleanup()`, which displays the output of all completed child processes. Since the child process will send the signal almost immediately after starting, it can be assumed the that process that send the signal will be the last one that completed, and so its output will be displayed last (so it isn't hidden from the user).
Note that PR #753 is still needed, since there are still scenarios in which a child would not produce any JSON output, but the parent testssl.sh would not exit (e.g., the child process cannot open a socket to the server it is supposed to test). In additional, PR #754 would still be useful, since it would be more user friendly to catch the error in the mass testing file immediately (when possible) rather that partway through a potentially time-consuming testing process.
2017-05-24 23:12:18 +02:00
trap "child_error" USR1
2015-06-02 15:53:46 +02:00
2016-09-27 23:32:24 +02:00
readonly VERSION = "2.9dev"
2015-06-16 19:53:40 +02:00
readonly SWCONTACT = "dirk aet testssl dot sh"
2015-08-28 00:15:51 +02:00
egrep -q "dev|rc" <<< " $VERSION " && \
2015-09-17 15:30:15 +02:00
SWURL = "https://testssl.sh/dev/" ||
2017-09-20 10:56:33 +02:00
SWURL = "https://testssl.sh/"
2015-06-16 19:53:40 +02:00
2017-04-12 20:34:26 +02:00
readonly PROG_NAME = " $( basename " $0 " ) "
readonly RUN_DIR = " $( dirname " $0 " ) "
2017-04-06 09:47:09 +02:00
TESTSSL_INSTALL_DIR = " ${ TESTSSL_INSTALL_DIR :- "" } " # if you run testssl.sh from a different path you can set either TESTSSL_INSTALL_DIR
CA_BUNDLES_PATH = " ${ CA_BUNDLES_PATH :- "" } " # or CA_BUNDLES_PATH to find the CA BUNDLES. TESTSSL_INSTALL_DIR helps you to find the RFC mapping also
2016-11-15 15:20:48 +01:00
CIPHERS_BY_STRENGTH_FILE = ""
2017-04-06 09:47:09 +02:00
TLS_DATA_FILE = "" # mandatory file for socket based handdhakes
2015-11-21 13:39:37 +01:00
OPENSSL_LOCATION = ""
HNAME = " $( hostname) "
HNAME = " ${ HNAME %%.* } "
2017-04-12 16:00:40 +02:00
declare CMDLINE
2017-04-11 23:05:27 +02:00
# When performing mass testing, the child processes need to be sent the
# command line in the form of an array (see #702 and http://mywiki.wooledge.org/BashFAQ/050).
readonly -a CMDLINE_ARRAY = ( " $@ " )
declare -a MASS_TESTING_CMDLINE
2015-05-17 22:43:53 +02:00
2015-08-28 00:15:51 +02:00
readonly CVS_REL = $( tail -5 " $0 " | awk '/dirkw Exp/ { print $4" "$5" "$6}' )
readonly CVS_REL_SHORT = $( tail -5 " $0 " | awk '/dirkw Exp/ { print $4 }' )
2015-12-08 16:37:35 +01:00
if git log & >/dev/null; then
2015-11-21 13:39:37 +01:00
readonly GIT_REL = $( git log --format= '%h %ci' -1 2>/dev/null | awk '{ print $1" "$2" "$3 }' )
readonly GIT_REL_SHORT = $( git log --format= '%h %ci' -1 2>/dev/null | awk '{ print $1 }' )
readonly REL_DATE = $( git log --format= '%h %ci' -1 2>/dev/null | awk '{ print $2 }' )
else
readonly REL_DATE = $( tail -5 " $0 " | awk '/dirkw Exp/ { print $5 }' )
fi
2016-01-23 19:18:33 +01:00
readonly SYSTEM = $( uname -s)
2017-02-08 09:08:05 +01:00
SYSTEM2 = "" # currently only being used for WSL = bash on windows
2016-06-20 21:51:40 +02:00
date -d @735275209 >/dev/null 2>& 1 && \
2015-09-17 15:30:15 +02:00
readonly HAS_GNUDATE = true || \
readonly HAS_GNUDATE = false
2016-06-20 21:51:40 +02:00
# FreeBSD and OS X date(1) accept "-f inputformat"
date -j -f '%s' 1234567 >/dev/null 2>& 1 && \
readonly HAS_FREEBSDDATE = true || \
readonly HAS_FREEBSDDATE = false
2015-06-17 11:33:29 +02:00
echo A | sed -E 's/A//' >/dev/null 2>& 1 && \
2015-09-17 15:30:15 +02:00
readonly HAS_SED_E = true || \
2015-10-30 09:46:35 +01:00
readonly HAS_SED_E = false
2015-06-16 19:53:40 +02:00
2015-11-03 10:30:59 +01:00
tty -s && \
readonly INTERACTIVE = true || \
readonly INTERACTIVE = false
2017-02-14 20:40:38 +01:00
if [ [ -z $TERM_WIDTH ] ] ; then # no batch file and no otherwise predefined TERM_WIDTH
if ! tput cols & >/dev/null || ! " $INTERACTIVE " ; then # Prevent tput errors if running non interactive
export TERM_WIDTH = ${ COLUMNS :- 80 }
else
export TERM_WIDTH = ${ COLUMNS :- $( tput cols) } # for custom line wrapping and dashes
fi
2015-10-30 09:46:35 +01:00
fi
2017-02-14 20:40:38 +01:00
TERM_CURRPOS = 0 # custom line wrapping needs alter the current horizontal cursor pos
2015-05-31 14:40:12 +02:00
2017-06-12 18:23:55 +02:00
## CONFIGURATION PART ##
2017-10-20 21:36:41 +02:00
# following variables make use of $ENV, e.g. OPENSSL=<myprivate_path_to_openssl> ./testssl.sh <URI>
2016-07-08 11:25:41 +02:00
# 0 means (normally) true here. Some of the variables are also accessible with a command line switch, see --help
2016-07-12 15:59:24 +02:00
declare -x OPENSSL OPENSSL_TIMEOUT
2017-04-19 00:30:09 +02:00
FAST_SOCKET = ${ FAST_SOCKET :- false } # EXPERIMENTAL feature to accelerate sockets -- DO NOT USE it for production
2015-09-17 15:30:15 +02:00
COLOR = ${ COLOR :- 2 } # 2: Full color, 1: b/w+positioning, 0: no ESC at all
2015-12-06 20:11:33 +01:00
COLORBLIND = ${ COLORBLIND :- false } # if true, swap blue and green in the output
2016-03-05 21:07:49 +01:00
SHOW_EACH_C = ${ SHOW_EACH_C :- false } # where individual ciphers are tested show just the positively ones tested
2016-03-03 19:50:44 +01:00
SHOW_SIGALGO = ${ SHOW_SIGALGO :- false } # "secret" switch whether testssl.sh shows the signature algorithm for -E / -e
2016-01-23 19:18:33 +01:00
SNEAKY = ${ SNEAKY :- false } # is the referer and useragent we leave behind just usual?
2017-02-25 16:31:30 +01:00
QUIET = ${ QUIET :- false } # don't output the banner. By doing this you acknowledge usage term appearing in the banner
2015-09-17 15:30:15 +02:00
SSL_NATIVE = ${ SSL_NATIVE :- false } # we do per default bash sockets where possible "true": switch back to "openssl native"
2016-10-11 22:30:30 +02:00
ASSUME_HTTP = ${ ASSUME_HTTP :- false } # in seldom cases (WAF, old servers, grumpy SSL) service detection fails. "True" enforces HTTP checks
2015-11-03 23:29:53 +01:00
BUGS = ${ BUGS :- "" } # -bugs option from openssl, needed for some BIG IP F5
2017-06-28 20:28:23 +02:00
WARNINGS = ${ WARNINGS :- "" } # can be either off or batch
2016-07-08 11:15:41 +02:00
DEBUG = ${ DEBUG :- 0 } # 1: normal putput the files in /tmp/ are kept for further debugging purposes
# 2: list more what's going on , also lists some errors of connections
2016-01-23 19:18:33 +01:00
# 3: slight hexdumps + other info,
2017-01-05 20:20:19 +01:00
# 4: display bytes sent via sockets
2016-07-08 11:15:41 +02:00
# 5: display bytes received via sockets
# 6: whole 9 yards
2016-11-15 12:59:07 +01:00
FAST = ${ FAST :- false } # preference: show only first cipher, run_allciphers with openssl instead of sockets
2016-07-08 11:15:41 +02:00
WIDE = ${ WIDE :- false } # whether to display for some options just ciphers or a table w hexcode/KX,Enc,strength etc.
2017-06-12 18:23:55 +02:00
MASS_TESTING_MODE = ${ MASS_TESTING_MODE :- serial } # can be serial or parallel. Subject to change
2017-04-07 09:49:44 +02:00
LOGFILE = " ${ LOGFILE :- "" } " # logfile if used
JSONFILE = " ${ JSONFILE :- "" } " # jsonfile if used
CSVFILE = " ${ CSVFILE :- "" } " # csvfile if used
HTMLFILE = " ${ HTMLFILE :- "" } " # HTML if used
2017-08-30 21:09:52 +02:00
FNAME = ${ FNAME :- "" } # file name to read commands from
2017-11-14 19:41:25 +01:00
FNAME_PREFIX = ${ FNAME_PREFIX :- "" }
2016-06-23 14:33:26 +02:00
APPEND = ${ APPEND :- false } # append to csv/json file instead of overwriting it
2017-08-30 21:09:52 +02:00
NODNS = ${ NODNS :- false } # always do DNS lookups per default. For some pentests it might save time to set this to true
2016-06-02 09:59:52 +02:00
HAS_IPv6 = ${ HAS_IPv6 :- false } # if you have OpenSSL with IPv6 support AND IPv6 networking set it to yes
2017-08-30 23:04:52 +02:00
ALL_CLIENTS = ${ ALL_CLIENTS :- false } # do you want to run all client simulation form all clients supplied by SSLlabs?
2015-11-11 11:56:32 +01:00
2017-08-30 21:09:52 +02:00
# tuning vars which cannot be set by a cmd line switch
2015-11-11 11:56:32 +01:00
EXPERIMENTAL = ${ EXPERIMENTAL :- false }
2015-09-17 15:30:15 +02:00
HEADER_MAXSLEEP = ${ HEADER_MAXSLEEP :- 5 } # we wait this long before killing the process to retrieve a service banner / http header
2017-07-11 10:03:33 +02:00
MAX_WAITSOCK = ${ MAX_WAITSOCK :- 10 } # waiting at max 10 seconds for socket reply. There shouldn't be any reason to change this.
CCS_MAX_WAITSOCK = ${ CCS_MAX_WAITSOCK :- 5 } # for the two CCS payload (each). There shouldn't be any reason to change this.
HEARTBLEED_MAX_WAITSOCK = ${ HEARTBLEED_MAX_WAITSOCK :- 8 } # for the heartbleed payload. There shouldn't be any reason to change this.
2015-09-17 15:30:15 +02:00
STARTTLS_SLEEP = ${ STARTTLS_SLEEP :- 1 } # max time to wait on a socket replay for STARTTLS
2017-07-11 10:03:33 +02:00
FAST_STARTTLS = ${ FAST_STARTTLS :- true } # at the cost of reliabilty decrease the handshakes for STARTTLS
2015-09-17 15:30:15 +02:00
USLEEP_SND = ${ USLEEP_SND :- 0 .1 } # sleep time for general socket send
USLEEP_REC = ${ USLEEP_REC :- 0 .2 } # sleep time for general socket receive
2015-08-13 16:56:12 +02:00
HSTS_MIN = ${ HSTS_MIN :- 179 } # >179 days is ok for HSTS
2016-09-01 12:42:56 +02:00
HSTS_MIN = $(( HSTS_MIN * 86400 )) # correct to seconds
2015-08-13 16:56:12 +02:00
HPKP_MIN = ${ HPKP_MIN :- 30 } # >=30 days should be ok for HPKP_MIN, practical hints?
2016-09-01 19:09:12 +02:00
HPKP_MIN = $(( HPKP_MIN * 86400 )) # correct to seconds
2015-08-13 16:56:12 +02:00
DAYS2WARN1 = ${ DAYS2WARN1 :- 60 } # days to warn before cert expires, threshold 1
DAYS2WARN2 = ${ DAYS2WARN2 :- 30 } # days to warn before cert expires, threshold 2
2015-11-11 11:56:32 +01:00
VULN_THRESHLD = ${ VULN_THRESHLD :- 1 } # if vulnerabilities to check >$VULN_THRESHLD we DON'T show a separate header line in the output each vuln. check
2017-05-19 20:28:18 +02:00
DNS_VIA_PROXY = ${ DNS_VIA_PROXY :- false } # don't do DNS lookups via proxy. --ip=proxy reverses this
2017-08-30 21:09:52 +02:00
UNBRACKTD_IPV6 = ${ UNBRACKTD_IPV6 :- false } # some versions of OpenSSL (like Gentoo) don't support [bracketed] IPv6 addresses
2017-09-23 11:34:37 +02:00
NO_ENGINE = ${ NO_ENGINE :- false } # if there are problems finding the (external) openssl engine set this to true
2016-07-03 21:45:49 +02:00
readonly CLIENT_MIN_PFS = 5 # number of ciphers needed to run a test for PFS
2015-11-11 11:56:32 +01:00
CAPATH = " ${ CAPATH :- /etc/ssl/certs/ } " # Does nothing yet (FC has only a CA bundle per default, ==> openssl version -d)
2017-03-22 16:02:48 +01:00
MEASURE_TIME_FILE = ${ MEASURE_TIME_FILE :- "" }
if [ [ -n " $MEASURE_TIME_FILE " ] ] && [ [ -z " $MEASURE_TIME " ] ] ; then
MEASURE_TIME = true
else
MEASURE_TIME = ${ MEASURE_TIME :- false }
fi
2017-06-12 18:23:55 +02:00
DISPLAY_CIPHERNAMES = "openssl" # display OpenSSL ciphername (but both OpenSSL and RFC ciphernames in wide mode)
readonly UA_STD = " TLS tester from $SWURL "
readonly UA_SNEAKY = "Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0"
2015-05-17 22:43:53 +02:00
2017-08-30 21:09:52 +02:00
# initialization part, further global vars just declared here
IKNOW_FNAME = false
FIRST_FINDING = true # is this the first finding we are outputting to file?
JSONHEADER = true # include JSON headers and footers in HTML file, if one is being created
CSVHEADER = true # same for CSV
HTMLHEADER = true # same for HTML
GIVE_HINTS = false # give an addtional info to findings
SERVER_SIZE_LIMIT_BUG = false # Some servers have either a ClientHello total size limit or a 128 cipher limit (e.g. old ASAs)
CHILD_MASS_TESTING = ${ CHILD_MASS_TESTING :- false }
HAD_SLEPT = 0
2015-05-17 22:43:53 +02:00
readonly NPN_PROTOs = "spdy/4a2,spdy/3,spdy/3.1,spdy/2,spdy/1,http/1.1"
2016-09-24 16:59:28 +02:00
# alpn_protos needs to be space-separated, not comma-seperated, including odd ones observerd @ facebook and others, old ones like h2-17 omitted as they could not be found
readonly ALPN_PROTOs = "h2 spdy/3.1 http/1.1 h2-fb spdy/1 spdy/2 spdy/3 stun.turn stun.nat-discovery webrtc c-webrtc ftp"
2017-04-22 15:39:18 +02:00
declare -a SESS_RESUMPTION
2015-05-17 22:43:53 +02:00
TEMPDIR = ""
2015-05-18 21:51:45 +02:00
TMPFILE = ""
2015-08-24 23:50:03 +02:00
ERRFILE = ""
2015-10-11 23:07:16 +02:00
CLIENT_AUTH = false
2015-11-03 13:13:10 +01:00
NO_SSL_SESSIONID = false
2015-05-18 21:51:45 +02:00
HOSTCERT = ""
HEADERFILE = ""
2016-10-01 22:25:14 +02:00
HEADERVALUE = ""
2016-09-21 20:32:04 +02:00
HTTP_STATUS_CODE = ""
2017-10-02 13:48:55 +02:00
PROTOS_OFFERED = "" # this is a global to keep the info which protocol is being offered. See has_server_protocol()
2015-10-11 23:07:16 +02:00
TLS_EXTENSIONS = ""
2017-07-19 18:46:46 +02:00
BAD_SERVER_HELLO_CIPHER = false # reserved for cases where a ServerHello doesn't contain a cipher offered in the ClientHello
2015-10-11 23:07:16 +02:00
GOST_STATUS_PROBLEM = false
2015-05-17 22:43:53 +02:00
DETECTED_TLS_VERSION = ""
2016-01-31 21:02:18 +01:00
PATTERN2SHOW = ""
2015-05-17 22:43:53 +02:00
SOCK_REPLY_FILE = ""
NW_STR = ""
LEN_STR = ""
SNI = ""
2017-07-25 10:54:01 +02:00
POODLE = "" # keep vulnerability status for TLS_FALLBACK_SCSV
2017-09-22 18:48:38 +02:00
OSSL_NAME = "" # openssl name, in case of LibreSSL it's LibreSSL
2015-09-17 15:30:15 +02:00
OSSL_VER = "" # openssl version, will be auto-determined
2015-05-17 22:43:53 +02:00
OSSL_VER_MAJOR = 0
OSSL_VER_MINOR = 0
OSSL_VER_APPENDIX = "none"
2017-08-28 18:25:45 +02:00
CLIENT_PROB_NO = 1
2016-03-05 21:07:49 +01:00
HAS_DH_BITS = ${ HAS_DH_BITS :- false } # initialize openssl variables
HAS_SSL2 = false
HAS_SSL3 = false
2017-10-10 22:00:47 +02:00
HAS_TLS13 = false
2016-08-24 16:14:12 +02:00
HAS_NO_SSL2 = false
2017-07-03 22:24:02 +02:00
HAS_NOSERVERNAME = false
2015-10-11 23:07:16 +02:00
HAS_ALPN = false
2016-03-05 21:07:49 +01:00
HAS_SPDY = false
2016-09-21 21:42:45 +02:00
HAS_FALLBACK_SCSV = false
HAS_PROXY = false
HAS_XMPP = false
2016-12-08 19:54:44 +01:00
HAS_POSTGRES = false
2017-06-29 23:57:32 +02:00
HAS_MYSQL = false
2015-09-17 15:30:15 +02:00
PORT = 443 # unless otherwise auto-determined, see below
2015-06-15 12:13:16 +02:00
NODE = ""
2015-05-17 22:43:53 +02:00
NODEIP = ""
2017-09-18 23:25:07 +02:00
rDNS = ""
2015-10-05 09:56:21 +02:00
CORRECT_SPACES = "" # used for IPv6 and proper output formatting
2015-06-15 12:13:16 +02:00
IPADDRs = ""
IP46ADDRs = ""
2015-10-15 14:15:07 +02:00
LOCAL_A = false # does the $NODEIP come from /etc/hosts?
2015-09-17 15:30:15 +02:00
LOCAL_AAAA = false # does the IPv6 IP come from /etc/hosts?
2015-07-06 20:42:43 +02:00
XMPP_HOST = ""
2015-06-29 22:29:15 +02:00
PROXY = ""
PROXYIP = ""
PROXYPORT = ""
2015-05-17 22:43:53 +02:00
VULN_COUNT = 0
2015-09-17 15:30:15 +02:00
SERVICE = "" # is the server running an HTTP server, SMTP, POP or IMAP?
2015-05-17 22:43:53 +02:00
URI = ""
2016-03-03 19:50:44 +01:00
CERT_FINGERPRINT_SHA2 = ""
run_drown() when server has non-RSA certificates
This PR address a problem in `run_drown()` when the server does not support SSLv2, but does support multiple certificates or doesn't have an RSA certificate.
One example of the problem can be seen with www.facebook.com. If `run_server_preferences()` is run before `run_drown()`, then the results of `run_drown()` are:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 SHA256 A3F474FB17509AE6C5B6BA5E46B79E0DE6AF1BF1EEAA040A6114676E714C9965 could help you to find out
```
If only `run_drown()` is performed, then the result is:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 could help you to find out
```
However, A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 is the fingerprint of Facebook's ECDSA certificate, not its RSA certificate.
In addition, as noted in the "FIXME," `run_drown()` will display the warning "make sure you don't use this certificate elsewhere with SSLv2 enabled services" even if the server doesn't have an RSA certificate, even though SSLv2 can only use RSA certificates.
This PR fixes this issue by only showing the warning if the server has an RSA certificate and by ensuring that the `$cert_fingerprint_sha2` used to construct the "https://censys.io/ipv4?q=..." URL only contains a single SHA256 fingerprint and that it is the fingerprint of the server's RSA certificate.
2016-12-02 16:16:04 +01:00
RSA_CERT_FINGERPRINT_SHA2 = ""
2015-05-17 22:43:53 +02:00
STARTTLS_PROTOCOL = ""
2016-04-21 18:04:33 +02:00
OPTIMAL_PROTO = "" # we need this for IIS6 (sigh) and OpenSSL 1.0.2, otherwise some handshakes
2015-09-17 15:30:15 +02:00
# will fail, see https://github.com/PeterMosmans/openssl/issues/19#issuecomment-100897892
STARTTLS_OPTIMAL_PROTO = "" # same for STARTTLS, see https://github.com/drwetter/testssl.sh/issues/188
2017-10-31 11:27:19 +01:00
TLS_TIME = "" # to keep the value of TLS server timestamp
TLS_NOW = "" # similar
TLS_DIFFTIME_SET = false # tells TLS functions to measure the TLS difftime or not
2015-09-28 22:54:00 +02:00
NOW_TIME = ""
2015-05-17 22:43:53 +02:00
HTTP_TIME = ""
GET_REQ11 = ""
2017-03-21 12:44:03 +01:00
START_TIME = 0 # time in epoch when the action started
END_TIME = 0 # .. ended
SCAN_TIME = 0 # diff of both: total scan time
LAST_TIME = 0 # only used for performance measurements (MEASURE_TIME=true)
2015-05-17 22:43:53 +02:00
# Devel stuff, see -q below
TLS_LOW_BYTE = ""
HEX_CIPHER = ""
2017-03-21 12:44:03 +01:00
SERVER_COUNTER = 0 # Counter for multiple servers
2017-01-28 07:17:58 +01:00
2017-04-05 22:58:57 +02:00
########### Global variables for parallel mass testing
readonly PARALLEL_SLEEP = 1 # Time to sleep after starting each test
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
readonly MAX_WAIT_TEST = 1200 # Maximum time (in seconds) to wait for a test to complete
2017-04-05 22:58:57 +02:00
readonly MAX_PARALLEL = 20 # Maximum number of tests to run in parallel
2017-05-16 20:16:35 +02:00
# This value may be made larger on systems with faster processors
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
declare -a -i PARALLEL_TESTING_PID = ( ) # process id for each child test (or 0 to indicate test has already completed)
2017-04-05 22:58:57 +02:00
declare -a PARALLEL_TESTING_CMDLINE = ( ) # command line for each child test
declare -i NR_PARALLEL_TESTS = 0 # number of parallel tests run
declare -i NEXT_PARALLEL_TEST_TO_FINISH = 0 # number of parallel tests that have completed and have been processed
2017-05-23 20:52:25 +02:00
declare FIRST_JSON_OUTPUT = true # true if no output has been added to $JSONFILE yet.
2017-04-05 22:58:57 +02:00
2016-10-28 15:30:07 +02:00
#################### SEVERITY ####################
2017-04-06 09:47:09 +02:00
2016-10-28 15:30:07 +02:00
INFO = 0
OK = 0
LOW = 1
MEDIUM = 2
HIGH = 3
CRITICAL = 4
SEVERITY_LEVEL = 0
set_severity_level( ) {
local severity = $1
if [ [ " $severity " = = "LOW" ] ] ; then
SEVERITY_LEVEL = $LOW
elif [ [ " $severity " = = "MEDIUM" ] ] ; then
SEVERITY_LEVEL = $MEDIUM
elif [ [ " $severity " = = "HIGH" ] ] ; then
SEVERITY_LEVEL = $HIGH
elif [ [ " $severity " = = "CRITICAL" ] ] ; then
SEVERITY_LEVEL = $CRITICAL
else
echo "Supported severity levels are LOW, MEDIUM, HIGH, CRITICAL!"
Massing testing with command line error
There is a bug in testssl.sh that occurs if mass testing is being performed, there is an error in the command line for one of the child tests, and either a single HTML file or a single JSON file is being created.
If mass testing is being performed and `parse_cmd_line()` detects an error in the command line for one of the child tests, then it will call `help()`, which will exit the program, resulting in `cleanup ()` being called. `cleanup ()` will call `html_footer()` and `fileout_footer()`. Since `html_header()` and `json_header()` have not yet been called, `$HTMLHEADER` and `$JSONHEADER` will both be `true, and so `html_footer()` and `fileout_footer()` will output HTML and JSON footers, even though no headers have been output.
This PR fixes the problem by having `help()` set `$HTMLHEADER` and `$JSONHEADER` to `false` so that no HTML or JSON footers are created.
A related problem is that if a single JSON file is being created, the parent process will insert a separator (a comma) into the JSON file between the outputs of each child process. However, if there is an error in one of the child process's command lines, then this child process will not produce any JSON output and so the JSON file will have two consecutive separators (commas), which is invalid according to http://jsonlint.com.
This PR provides a partial fix for the problem for parallel mass testing by checking whether a child process has created a non-empty JSON output before adding a separator to the JSON file. It leaves two unresolved problems:
* It does not fix the problem at all for `run_mass_testing()`, where the separator is added before the test with the command line error is run.
* It does not fix the problem for parallel mass testing for the case in which the first child test has a command line error.
2017-05-22 22:57:15 +02:00
help 1
2016-10-28 15:30:07 +02:00
fi
}
show_finding( ) {
local severity = $1
2017-03-06 20:37:52 +01:00
( [ [ " $severity " = = "DEBUG" ] ] ) ||
( [ [ " $severity " = = "WARN" ] ] ) ||
( [ [ " $severity " = = "INFO" ] ] && [ [ $SEVERITY_LEVEL -le $INFO ] ] ) ||
( [ [ " $severity " = = "OK" ] ] && [ [ $SEVERITY_LEVEL -le $OK ] ] ) ||
( [ [ " $severity " = = "LOW" ] ] && [ [ $SEVERITY_LEVEL -le $LOW ] ] ) ||
( [ [ " $severity " = = "MEDIUM" ] ] && [ [ $SEVERITY_LEVEL -le $MEDIUM ] ] ) ||
( [ [ " $severity " = = "HIGH" ] ] && [ [ $SEVERITY_LEVEL -le $HIGH ] ] ) ||
( [ [ " $severity " = = "CRITICAL" ] ] && [ [ $SEVERITY_LEVEL -le $CRITICAL ] ] )
2016-10-28 15:30:07 +02:00
}
2015-05-17 22:43:53 +02:00
2015-06-16 19:53:40 +02:00
2016-06-10 17:11:39 +02:00
###### Cipher suite information #####
declare -i TLS_NR_CIPHERS = 0
declare TLS_CIPHER_HEXCODE = ( )
declare TLS_CIPHER_OSSL_NAME = ( )
declare TLS_CIPHER_RFC_NAME = ( )
declare TLS_CIPHER_SSLVERS = ( )
declare TLS_CIPHER_KX = ( )
declare TLS_CIPHER_AUTH = ( )
declare TLS_CIPHER_ENC = ( )
declare TLS_CIPHER_EXPORT = ( )
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
declare TLS_CIPHER_OSSL_SUPPORTED = ( )
2015-05-17 22:43:53 +02:00
###### output functions ######
2017-02-10 16:59:20 +01:00
# For HTML output, replace any HTML reserved characters with the entity name
html_reserved( ) {
2017-10-11 16:59:13 +02:00
local output
2017-05-19 20:28:18 +02:00
" $do_html " || return 0
2017-10-11 16:59:13 +02:00
#sed -e 's/\&/\&/g' -e 's/</\</g' -e 's/>/\>/g' -e 's/"/\"/g' -e "s/'/\'/g" <<< "$1"
output = " ${ 1 // \& / \& amp ; } "
output = " ${ output //</ \& lt ; } "
output = " ${ output //>/ \& gt ; } "
output = " ${ output // \" / \& quot ; } "
output = " ${ output // \' / \& apos ; } "
tm_out " $output "
return 0
2017-02-10 16:59:20 +01:00
}
2017-02-25 16:31:30 +01:00
html_out( ) {
2017-03-23 21:43:04 +01:00
" $do_html " || return 0
2017-02-25 16:31:30 +01:00
[ [ -n " $HTMLFILE " ] ] && [ [ ! -d " $HTMLFILE " ] ] && printf -- "%b" " ${ 1 //%/%% } " >> " $HTMLFILE "
# here and other printf's: a little bit of sanitzing with bash internal search&replace -- otherwise printf will hiccup at '%'. '--' and %b do the rest.
2017-02-07 20:25:41 +01:00
}
2017-10-08 21:40:28 +02:00
# this is intentionally the same.
safe_echo( ) { printf -- "%b" " ${ 1 //%/%% } " ; }
tm_out( ) { printf -- "%b" " ${ 1 //%/%% } " ; }
tmln_out( ) { printf -- "%b" " ${ 1 //%/%% } \n " ; }
2017-02-07 20:25:41 +01:00
2017-10-08 22:03:29 +02:00
out( ) { printf -- "%b" " ${ 1 //%/%% } " ; html_out " $1 " ; }
outln( ) { printf -- "%b" " ${ 1 //%/%% } \n " ; html_out " $1 \n " ; }
Handle --file option
Introduced "trick" so that if the `--file` option is used, `html_header()` will only be called once before anything is printed and `html_footer()` will only be called once after all printing is complete. With this, `html_header()` now delete the output file if it exists.
Also introduced the `html_reserved()`, which is called for all text to be sent to `out_html()`. `html_reserved()` converts any HTML reserved characters (", ', &, <, >) to their corresponding entity names (", ', &, <, >).
2017-02-09 23:03:21 +01:00
2015-08-21 18:10:45 +02:00
#TODO: Still no shell injection safe but if just run it from the cmd line: that's fine
2015-05-17 22:43:53 +02:00
# color print functions, see also http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/x329.html
2017-02-25 16:31:30 +01:00
tm_liteblue( ) { [ [ " $COLOR " -eq 2 ] ] && ( " $COLORBLIND " && tm_out " \033[0;32m $1 " || tm_out " \033[0;34m $1 " ) || tm_out " $1 " ; tm_off; } # not yet used
pr_liteblue( ) { tm_liteblue " $1 " ; [ [ " $COLOR " -eq 2 ] ] && ( " $COLORBLIND " && html_out " <span style=\"color:#00cd00;\"> $( html_reserved " $1 " ) </span> " || html_out " <span style=\"color:#0000ee;\"> $( html_reserved " $1 " ) </span> " ) || html_out " $( html_reserved " $1 " ) " ; }
tmln_liteblue( ) { tm_liteblue " $1 " ; tmln_out; }
prln_liteblue( ) { pr_liteblue " $1 " ; outln; }
tm_blue( ) { [ [ " $COLOR " -eq 2 ] ] && ( " $COLORBLIND " && tm_out " \033[1;32m $1 " || tm_out " \033[1;34m $1 " ) || tm_out " $1 " ; tm_off; } # used for head lines of single tests
pr_blue( ) { tm_blue " $1 " ; [ [ " $COLOR " -eq 2 ] ] && ( " $COLORBLIND " && html_out " <span style=\"color:lime;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || html_out " <span style=\"color:#5c5cff;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " ) || html_out " $( html_reserved " $1 " ) " ; }
tmln_blue( ) { tm_blue " $1 " ; tmln_out; }
prln_blue( ) { pr_blue " $1 " ; outln; }
# we should be able to use aliases here
tm_warning( ) { [ [ " $COLOR " -eq 2 ] ] && tm_out " \033[0;35m $1 " || tm_underline " $1 " ; tm_off; } # some local problem: one test cannot be done
tmln_warning( ) { tm_warning " $1 " ; tmln_out; } # litemagenta
pr_warning( ) { tm_warning " $1 " ; [ [ " $COLOR " -eq 2 ] ] && html_out " <span style=\"color:#cd00cd;\"> $( html_reserved " $1 " ) </span> " || ( [ [ " $COLOR " -eq 1 ] ] && html_out " <u> $( html_reserved " $1 " ) </u> " || html_out " $( html_reserved " $1 " ) " ) ; }
prln_warning( ) { pr_warning " $1 " ; outln; }
tm_magenta( ) { [ [ " $COLOR " -eq 2 ] ] && tm_out " \033[1;35m $1 " || tm_underline " $1 " ; tm_off; } # fatal error: quitting because of this!
tmln_magenta( ) { tm_magenta " $1 " ; tmln_out; }
# different as warning above?
pr_magenta( ) { tm_magenta " $1 " ; [ [ " $COLOR " -eq 2 ] ] && html_out " <span style=\"color:magenta;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || ( [ [ " $COLOR " -eq 1 ] ] && html_out " <u> $( html_reserved " $1 " ) </u> " || html_out " $( html_reserved " $1 " ) " ) ; }
prln_magenta( ) { pr_magenta " $1 " ; outln; }
tm_litecyan( ) { [ [ " $COLOR " -eq 2 ] ] && tm_out " \033[0;36m $1 " || tm_out " $1 " ; tm_off; } # not yet used
tmln_litecyan( ) { tm_litecyan " $1 " ; tmln_out; }
pr_litecyan( ) { tm_litecyan " $1 " ; [ [ " $COLOR " -eq 2 ] ] && html_out " <span style=\"color:#00cdcd;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ; }
prln_litecyan( ) { pr_litecyan " $1 " ; outln; }
tm_cyan( ) { [ [ " $COLOR " -eq 2 ] ] && tm_out " \033[1;36m $1 " || tm_out " $1 " ; tm_off; } # additional hint
tmln_cyan( ) { tm_cyan " $1 " ; tmln_out; }
pr_cyan( ) { tm_cyan " $1 " ; [ [ " $COLOR " -eq 2 ] ] && html_out " <span style=\"color:cyan;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ; }
prln_cyan( ) { pr_cyan " $1 " ; outln; }
tm_litegrey( ) { [ [ " $COLOR " -ne 0 ] ] && tm_out " \033[0;37m $1 " || tm_out " $1 " ; tm_off; } # ... https://github.com/drwetter/testssl.sh/pull/600#issuecomment-276129876
tmln_litegrey( ) { tm_litegrey " $1 " ; tmln_out; } # not really usable on a black background, see ..
prln_litegrey( ) { pr_litegrey " $1 " ; outln; }
pr_litegrey( ) { tm_litegrey " $1 " ; [ [ " $COLOR " -ne 0 ] ] && html_out " <span style=\"color:darkgray;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ; }
tm_grey( ) { [ [ " $COLOR " -ne 0 ] ] && tm_out " \033[1;30m $1 " || tm_out " $1 " ; tm_off; }
pr_grey( ) { tm_grey " $1 " ; [ [ " $COLOR " -ne 0 ] ] && html_out " <span style=\"color:#7f7f7f;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ; }
tmln_grey( ) { tm_grey " $1 " ; tmln_out; }
prln_grey( ) { pr_grey " $1 " ; outln; }
tm_done_good( ) { [ [ " $COLOR " -eq 2 ] ] && ( " $COLORBLIND " && tm_out " \033[0;34m $1 " || tm_out " \033[0;32m $1 " ) || tm_out " $1 " ; tm_off; } # litegreen (liteblue), This is good
tmln_done_good( ) { tm_done_good " $1 " ; tmln_out; }
pr_done_good( ) { tm_done_good " $1 " ; [ [ " $COLOR " -eq 2 ] ] && ( " $COLORBLIND " && html_out " <span style=\"color:#0000ee;\"> $( html_reserved " $1 " ) </span> " || html_out " <span style=\"color:#00cd00;\"> $( html_reserved " $1 " ) </span> " ) || html_out " $( html_reserved " $1 " ) " ; }
prln_done_good( ) { pr_done_good " $1 " ; outln; }
tm_done_best( ) { [ [ " $COLOR " -eq 2 ] ] && ( " $COLORBLIND " && tm_out " \033[1;34m $1 " || tm_out " \033[1;32m $1 " ) || tm_out " $1 " ; tm_off; } # green (blue), This is the best
2017-02-25 17:15:18 +01:00
tmln_done_best( ) { tm_done_best " $1 " ; tmln_out; }
2017-02-25 16:31:30 +01:00
pr_done_best( ) { tm_done_best " $1 " ; [ [ " $COLOR " -eq 2 ] ] && ( " $COLORBLIND " && html_out " <span style=\"color:#5c5cff;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || html_out " <span style=\"color:lime;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " ) || html_out " $( html_reserved " $1 " ) " ; }
2017-04-07 09:49:44 +02:00
prln_done_best( ) { pr_done_best " $1 " ; outln; }
2017-02-25 16:31:30 +01:00
tm_svrty_low( ) { [ [ " $COLOR " -eq 2 ] ] && tm_out " \033[1;33m $1 " || tm_out " $1 " ; tm_off; } # yellow brown | academic or minor problem
tmln_svrty_low( ) { tm_svrty_low " $1 " ; tmln_out; }
pr_svrty_low( ) { tm_svrty_low " $1 " ; [ [ " $COLOR " -eq 2 ] ] && html_out " <span style=\"color:#cdcd00;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ; }
prln_svrty_low( ) { pr_svrty_low " $1 " ; outln; }
tm_svrty_medium( ) { [ [ " $COLOR " -eq 2 ] ] && tm_out " \033[0;33m $1 " || tm_out " $1 " ; tm_off; } # brown | it is not a bad problem but you shouldn't do this
pr_svrty_medium( ) { tm_svrty_medium " $1 " ; [ [ " $COLOR " -eq 2 ] ] && html_out " <span style=\"color:#cd8000;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ; }
tmln_svrty_medium( ) { tm_svrty_medium " $1 " ; tmln_out; }
prln_svrty_medium( ) { pr_svrty_medium " $1 " ; outln; }
tm_svrty_high( ) { [ [ " $COLOR " -eq 2 ] ] && tm_out " \033[0;31m $1 " || tm_bold " $1 " ; tm_off; } # litered
pr_svrty_high( ) { tm_svrty_high " $1 " ; [ [ " $COLOR " -eq 2 ] ] && html_out " <span style=\"color:#cd0000;\"> $( html_reserved " $1 " ) </span> " || ( [ [ " $COLOR " -eq 1 ] ] && html_out " <span style=\"font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ) ; }
tmln_svrty_high( ) { tm_svrty_high " $1 " ; tmln_out; }
prln_svrty_high( ) { pr_svrty_high " $1 " ; outln; }
tm_svrty_critical( ) { [ [ " $COLOR " -eq 2 ] ] && tm_out " \033[1;31m $1 " || tm_bold " $1 " ; tm_off; } # red
pr_svrty_critical( ) { tm_svrty_critical " $1 " ; [ [ " $COLOR " -eq 2 ] ] && html_out " <span style=\"color:red;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || ( [ [ " $COLOR " -eq 1 ] ] && html_out " <span style=\"font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ) ; }
tmln_svrty_critical( ) { tm_svrty_critical " $1 " ; tmln_out; }
prln_svrty_critical( ) { pr_svrty_critical " $1 " ; outln; }
tm_deemphasize( ) { tm_out " $1 " ; } # hook for a weakened screen output, see #600
pr_deemphasize( ) { tm_deemphasize " $1 " ; html_out " <span style=\"color:darkgray;\"> $( html_reserved " $1 " ) </span> " ; }
tmln_deemphasize( ) { tm_deemphasize " $1 " ; tmln_out; }
prln_deemphasize( ) { pr_deemphasize " $1 " ; outln; }
2015-08-05 11:31:55 +02:00
2015-08-02 00:03:30 +02:00
# color=1 functions
2017-02-25 16:31:30 +01:00
tm_off( ) { [ [ " $COLOR " -ne 0 ] ] && tm_out "\033[m" ; }
tm_bold( ) { [ [ " $COLOR " -ne 0 ] ] && tm_out " \033[1m $1 " || tm_out " $1 " ; tm_off; }
tmln_bold( ) { tm_bold " $1 " ; tmln_out; }
pr_bold( ) { tm_bold " $1 " ; [ [ " $COLOR " -ne 0 ] ] && html_out " <span style=\"font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ; }
prln_bold( ) { pr_bold " $1 " ; outln; }
tm_italic( ) { [ [ " $COLOR " -ne 0 ] ] && tm_out " \033[3m $1 " || tm_out " $1 " ; tm_off; }
2017-04-06 16:54:20 +02:00
tmln_italic( ) { tm_italic " $1 " ; tmln_out; }
2017-02-25 16:31:30 +01:00
pr_italic( ) { tm_italic " $1 " ; [ [ " $COLOR " -ne 0 ] ] && html_out " <i> $( html_reserved " $1 " ) </i> " || html_out " $( html_reserved " $1 " ) " ; }
2017-04-06 16:54:20 +02:00
prln_italic( ) { pr_italic " $1 " ; outln; }
2017-02-25 16:31:30 +01:00
tm_strikethru( ) { [ [ " $COLOR " -ne 0 ] ] && tm_out " \033[9m $1 " || tm_out " $1 " ; tm_off; } # ugly!
tmln_strikethru( ) { tm_strikethru " $1 " ; tmln_out; }
pr_strikethru( ) { tm_strikethru " $1 " ; [ [ " $COLOR " -ne 0 ] ] && html_out " <strike> $( html_reserved " $1 " ) </strike> " || html_out " $( html_reserved " $1 " ) " ; }
prln_strikethru( ) { pr_strikethru " $1 " ; outln; }
tm_underline( ) { [ [ " $COLOR " -ne 0 ] ] && tm_out " \033[4m $1 " || tm_out " $1 " ; tm_off; }
tmln_underline( ) { tm_underline " $1 " ; tmln_out; }
pr_underline( ) { tm_underline " $1 " ; [ [ " $COLOR " -ne 0 ] ] && html_out " <u> $( html_reserved " $1 " ) </u> " || html_out " $( html_reserved " $1 " ) " ; }
prln_underline( ) { pr_underline " $1 " ; outln; }
tm_reverse( ) { [ [ " $COLOR " -ne 0 ] ] && tm_out " \033[7m $1 " || tm_out " $1 " ; tm_off; }
tm_reverse_bold( ) { [ [ " $COLOR " -ne 0 ] ] && tm_out " \033[7m\033[1m $1 " || tm_out " $1 " ; tm_off; }
pr_reverse( ) { tm_reverse " $1 " ; [ [ " $COLOR " -ne 0 ] ] && html_out " <span style=\"color:white;background-color:black;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ; }
pr_reverse_bold( ) { tm_reverse_bold " $1 " ; [ [ " $COLOR " -ne 0 ] ] && html_out " <span style=\"color:white;background-color:black;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ; }
2015-05-17 22:43:53 +02:00
2015-10-15 14:15:07 +02:00
#pr_headline() { pr_blue "$1"; }
#http://misc.flogisoft.com/bash/tip_colors_and_formatting
2017-02-25 16:31:30 +01:00
#pr_headline() { [[ "$COLOR" -eq 2 ]] && out "\033[1;30m\033[47m$1" || out "$1"; tm_off; }
tm_headline( ) { [ [ " $COLOR " -ne 0 ] ] && tm_out " \033[1m\033[4m $1 " || tm_out " $1 " ; tm_off; }
tmln_headline( ) { tm_headline " $1 " ; tmln_out; }
pr_headline( ) { tm_headline " $1 " ; [ [ " $COLOR " -ne 0 ] ] && html_out " <span style=\"text-decoration:underline;font-weight:bold;\"> $( html_reserved " $1 " ) </span> " || html_out " $( html_reserved " $1 " ) " ; }
2015-10-15 14:15:07 +02:00
pr_headlineln( ) { pr_headline " $1 " ; outln; }
2017-02-25 16:31:30 +01:00
tm_squoted( ) { tm_out " ' $1 ' " ; }
2015-10-15 14:15:07 +02:00
pr_squoted( ) { out " ' $1 ' " ; }
2017-02-25 16:31:30 +01:00
tm_dquoted( ) { tm_out " \" $1 \" " ; }
2015-10-15 14:15:07 +02:00
pr_dquoted( ) { out " \" $1 \" " ; }
2015-08-05 11:31:55 +02:00
2017-03-18 15:57:16 +01:00
# either files couldn't be found or openssl isn't good enough (which shouldn't happen anymore)
2017-02-25 16:31:30 +01:00
tm_local_problem( ) { tm_warning " Local problem: $1 " ; }
tmln_local_problem( ) { tmln_warning " Local problem: $1 " ; }
pr_local_problem( ) { pr_warning " Local problem: $1 " ; }
prln_local_problem( ) { prln_warning " Local problem: $1 " ; }
2016-06-07 23:06:58 +02:00
2017-03-18 15:57:16 +01:00
# general failure
tm_fixme( ) { tm_warning " Fixme: $1 " ; }
tmln_fixme( ) { tmln_warning " Fixme: $1 " ; }
pr_fixme( ) { pr_warning " Fixme: $1 " ; }
prln_fixme( ) { prln_warning " Fixme: $1 " ; }
2016-02-03 17:55:53 +01:00
2017-02-25 16:31:30 +01:00
pr_url( ) { tm_out " $1 " ; html_out "<a href=" $1 " style=\"color:black;text-decoration:none;\"> $1 </a> " ; }
pr_boldurl( ) { tm_bold " $1 " ; html_out "<a href=" $1 " style=\"font-weight:bold;color:black;text-decoration:none;\"> $1 </a> " ; }
2017-02-10 20:47:49 +01:00
2015-09-17 15:30:15 +02:00
### color switcher (see e.g. https://linuxtidbits.wordpress.com/2008/08/11/output-color-on-bash-scripts/
2017-03-31 12:24:25 +02:00
### http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/x405.html
2017-09-25 19:51:10 +02:00
### no ouput support for HTML!
2015-06-28 13:52:42 +02:00
set_color_functions( ) {
2015-11-02 10:49:40 +01:00
local ncurses_tput = true
2015-09-17 15:30:15 +02:00
2017-09-22 18:48:38 +02:00
if [ [ $( uname) = = OpenBSD ] ] && grep -q xterm-256 <<< " $TERM " ; then
export TERM = xterm
# openBSD can't handle 256 colors (yet) in xterm which might lead to ugly errors
# like "tput: not enough arguments (3) for capability `AF'". Not our fault but
# before we get blamed we fix it here.
fi
# empty all vars if we have COLOR=0 equals no escape code:
2015-09-17 15:30:15 +02:00
red = ""
green = ""
brown = ""
blue = ""
magenta = ""
cyan = ""
grey = ""
yellow = ""
off = ""
bold = ""
underline = ""
2015-10-15 14:15:07 +02:00
italic = ""
2015-09-17 15:30:15 +02:00
2017-09-19 00:08:33 +02:00
type -p tput & >/dev/null || return 0 # Hey wait, do we actually have tput / ncurses ?
2017-09-20 10:56:33 +02:00
tput cols & >/dev/null || return 0 # tput under BSDs and GNUs doesn't work either (TERM undefined?)
2015-11-02 10:49:40 +01:00
tput sgr0 & >/dev/null || ncurses_tput = false
2017-09-22 18:48:38 +02:00
tput sgr 0 1 & >/dev/null || ncurses_tput = false # OpenBSD succeed the previous one but fails here
2015-09-17 15:30:15 +02:00
if [ [ " $COLOR " -eq 2 ] ] ; then
2015-11-02 10:49:40 +01:00
if $ncurses_tput ; then
2015-09-17 15:30:15 +02:00
red = $( tput setaf 1)
green = $( tput setaf 2)
brown = $( tput setaf 3)
blue = $( tput setaf 4)
magenta = $( tput setaf 5)
cyan = $( tput setaf 6)
grey = $( tput setaf 7)
yellow = $( tput setaf 3; tput bold)
else # this is a try for old BSD, see terminfo(5)
red = $( tput AF 1)
green = $( tput AF 2)
brown = $( tput AF 3)
blue = $( tput AF 4)
magenta = $( tput AF 5)
cyan = $( tput AF 6)
grey = $( tput AF 7)
yellow = $( tput AF 3; tput md)
fi
fi
if [ [ " $COLOR " -ge 1 ] ] ; then
2015-11-02 10:49:40 +01:00
if $ncurses_tput ; then
2015-09-17 15:30:15 +02:00
bold = $( tput bold)
2017-09-22 18:48:38 +02:00
underline = $( tput sgr 0 1 2>/dev/null)
2015-10-15 14:15:07 +02:00
italic = $( tput sitm)
italic_end = $( tput ritm)
2015-09-17 15:30:15 +02:00
off = $( tput sgr0)
else # this is a try for old BSD, see terminfo(5)
bold = $( tput md)
underline = $( tput us)
2015-10-15 14:15:07 +02:00
italic = $( tput ZH) # that doesn't work on FreeBSD 9+10.x
italic_end = $( tput ZR) # here too. Probably entry missing in /etc/termcap
2015-09-17 15:30:15 +02:00
reverse = $( tput mr)
off = $( tput me)
fi
2017-09-25 19:51:10 +02:00
# italic doesn't work under Linux, FreeBSD (9). But both work under OpenBSD.
# alternatively we could use escape codes
2015-09-17 15:30:15 +02:00
fi
2015-06-28 13:52:42 +02:00
}
2015-05-17 22:43:53 +02:00
2016-01-23 19:18:33 +01:00
strip_quote( ) {
2016-10-28 15:30:07 +02:00
# remove color codes (see http://www.commandlinefu.com/commands/view/3584/remove-color-codes-special-characters-with-sed)
2016-01-23 23:33:17 +01:00
# \', leading and all trailing spaces
2016-12-14 20:55:17 +01:00
sed -e " s, $( echo -e "\033" ) \[[0-9;]*[a-zA-Z],,g " \
2016-01-23 23:33:17 +01:00
-e "s/\"/\\'/g" \
-e 's/^ *//g' \
2016-10-28 15:30:07 +02:00
-e 's/ *$//g' <<< " $1 "
}
#################### JSON FILE FORMATING ####################
fileout_pretty_json_footer( ) {
2017-01-28 07:17:58 +01:00
echo -e " ],
2017-03-21 12:44:03 +01:00
\" scanTime\" : \" $SCAN_TIME \" \n } "
2016-10-28 15:30:07 +02:00
}
fileout_json_footer( ) {
" $do_json " && printf "]\n" >> " $JSONFILE "
2017-01-28 07:17:58 +01:00
" $do_pretty_json " && ( printf " $( fileout_pretty_json_footer) " ) >> " $JSONFILE "
2016-10-28 15:30:07 +02:00
}
fileout_json_section( ) {
2017-08-28 21:11:47 +02:00
case $1 in
1) echo -e " \"singleCipher\" : [" ; ;
2) echo -e " \"protocols\" : [" ; ;
3) echo -e ",\n \"ciphers\" : [" ; ;
4) echo -e ",\n \"pfs\" : [" ; ;
5) echo -e ",\n \"serverPreferences\" : [" ; ;
6) echo -e ",\n \"serverDefaults\" : [" ; ;
7) echo -e ",\n \"headerResponse\" : [" ; ;
8) echo -e ",\n \"vulnerabilities\" : [" ; ;
9) echo -e ",\n \"cipherTests\" : [" ; ;
10) echo -e ",\n \"browserSimulations\": [" ; ;
2017-08-04 20:48:21 +02:00
11) echo -e ",\n \"grease\" : [" ; ;
2017-08-28 21:11:47 +02:00
*) echo "invalid section" ; ;
2017-03-18 22:24:35 +01:00
esac
2016-10-28 15:30:07 +02:00
}
2017-03-27 00:30:42 +02:00
fileout_section_header( ) {
2016-10-28 15:30:07 +02:00
local str = ""
2017-03-27 00:30:42 +02:00
" $2 " && str = " $( fileout_section_footer false ) "
2016-10-28 15:30:07 +02:00
" $do_pretty_json " && FIRST_FINDING = true && ( printf "%s%s\n" " $str " " $( fileout_json_section " $1 " ) " ) >> " $JSONFILE "
}
2017-03-31 12:24:25 +02:00
fileout_section_footer( ) {
2016-10-28 15:30:07 +02:00
" $do_pretty_json " && printf "\n ]" >> " $JSONFILE "
2017-03-27 00:30:42 +02:00
" $do_pretty_json " && " $1 " && echo -e "\n }" >> " $JSONFILE "
2016-10-28 15:30:07 +02:00
}
2016-11-17 23:27:27 +01:00
fileout_json_print_parameter( ) {
local parameter = " $1 "
local filler = " $2 "
local value = " $3 "
local not_last = " $4 "
2017-03-27 00:30:42 +02:00
local spaces = ""
2016-11-17 23:27:27 +01:00
2017-03-27 00:30:42 +02:00
" $do_json " && \
spaces = " " || \
spaces = " "
2016-11-17 23:27:27 +01:00
if [ [ ! -z " $value " ] ] ; then
2017-03-27 00:30:42 +02:00
printf "%s%s%s%s" " $spaces " " \" $parameter \" " " $filler " " : \" $value \" " >> " $JSONFILE "
2016-11-17 23:27:27 +01:00
" $not_last " && printf ",\n" >> " $JSONFILE "
fi
}
2016-10-28 15:30:07 +02:00
fileout_json_finding( ) {
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
local target
2017-03-27 00:30:42 +02:00
if " $do_json " ; then
" $FIRST_FINDING " || echo -n "," >> " $JSONFILE "
echo -e " {" >> " $JSONFILE "
fileout_json_print_parameter "id" " " " $1 " true
fileout_json_print_parameter "ip" " " " $NODE / $NODEIP " true
fileout_json_print_parameter "port" " " " $PORT " true
fileout_json_print_parameter "severity" " " " $2 " true
fileout_json_print_parameter "cve" " " " $cve " true
fileout_json_print_parameter "cwe" " " " $cwe " true
" $GIVE_HINTS " && fileout_json_print_parameter "hint" " " " $hint " true
fileout_json_print_parameter "finding" " " " $finding " false
echo -e "\n }" >> " $JSONFILE "
2016-10-28 15:30:07 +02:00
fi
if " $do_pretty_json " ; then
2017-01-28 07:17:58 +01:00
if [ [ " $1 " = = "service" ] ] ; then
if [ [ $SERVER_COUNTER -gt 1 ] ] ; then
echo " ," >> " $JSONFILE "
fi
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
if " $CHILD_MASS_TESTING " && ! " $JSONHEADER " ; then
target = " $NODE "
$do_mx_all_ips && target = " $URI "
echo -e " {
\" target host\" : \" $target \" ,
2017-03-30 18:37:41 +02:00
\" port\" : \" $PORT \" ,
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
\" service\" : \" $finding \" ,
\" ip\" : \" $NODEIP \" ," >> " $JSONFILE "
else
echo -e " {
2017-01-28 07:17:58 +01:00
\" service\" : \" $finding \" ,
\" ip\" : \" $NODEIP \" ," >> " $JSONFILE "
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
fi
2017-01-29 10:46:35 +01:00
$do_mx_all_ips && echo -e " \"hostname\" : \" $NODE \", " >> " $JSONFILE "
2017-03-27 00:30:42 +02:00
else
( " $FIRST_FINDING " && echo -n " {" >> " $JSONFILE " ) || echo -n ",{" >> " $JSONFILE "
echo -e -n "\n" >> " $JSONFILE "
fileout_json_print_parameter "id" " " " $1 " true
fileout_json_print_parameter "severity" " " " $2 " true
fileout_json_print_parameter "cve" " " " $cve " true
fileout_json_print_parameter "cwe" " " " $cwe " true
" $GIVE_HINTS " && fileout_json_print_parameter "hint" " " " $hint " true
fileout_json_print_parameter "finding" " " " $finding " false
echo -e -n "\n }" >> " $JSONFILE "
fi
2016-10-28 15:30:07 +02:00
fi
}
2017-03-27 00:30:42 +02:00
##################### FILE FORMATING #########################
fileout_pretty_json_banner( ) {
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
local target
if " $do_mass_testing " ; then
2017-03-29 17:22:29 +02:00
echo -e " \"Invocation\" : \" $PROG_NAME $CMDLINE \",
\" at\" : \" $HNAME :$OPENSSL_LOCATION \" ,
\" version\" : \" $VERSION ${ GIT_REL_SHORT :- $CVS_REL_SHORT } from $REL_DATE \" ,
2017-09-22 18:48:38 +02:00
\" openssl\" : \" $OSSL_NAME $OSSL_VER from $OSSL_BUILD_DATE \" ,
2017-03-29 17:22:29 +02:00
\" startTime\" : \" $START_TIME \" ,
\" scanResult\" : [ "
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
else
2017-03-29 17:22:29 +02:00
[ [ -z " $NODE " ] ] && parse_hn_port " ${ URI } "
# NODE, URL_PATH, PORT, IPADDR and IP46ADDR is set now --> wrong place
target = " $NODE "
$do_mx_all_ips && target = " $URI "
echo -e " \"Invocation\" : \" $PROG_NAME $CMDLINE \",
\" at\" : \" $HNAME :$OPENSSL_LOCATION \" ,
\" version\" : \" $VERSION ${ GIT_REL_SHORT :- $CVS_REL_SHORT } from $REL_DATE \" ,
2017-09-22 18:48:38 +02:00
\" openssl\" : \" $OSSL_NAME $OSSL_VER from $OSSL_BUILD_DATE \" ,
2017-03-29 17:22:29 +02:00
\" target host\" : \" $target \" ,
\" port\" : \" $PORT \" ,
\" startTime\" : \" $START_TIME \" ,
\" scanResult\" : [ "
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
fi
2016-10-28 15:30:07 +02:00
}
2017-03-27 00:30:42 +02:00
fileout_banner( ) {
#if ! "$APPEND"; then
# if "$CSVHEADER"; then
# :
# fi
if " $JSONHEADER " ; then
# "$do_json" && # here we maybe should add a banner, too
" $do_pretty_json " && ( printf "%s\n" " $( fileout_pretty_json_banner) " ) >> " $JSONFILE "
2016-10-15 22:56:53 +02:00
fi
2017-03-27 00:30:42 +02:00
#fi
2016-01-23 19:18:33 +01:00
}
2017-03-30 18:48:25 +02:00
fileout_separator( ) {
if " $JSONHEADER " ; then
" $do_pretty_json " && echo " ," >> " $JSONFILE "
" $do_json " && echo -n "," >> " $JSONFILE "
fi
}
2016-01-23 19:18:33 +01:00
fileout_footer( ) {
2017-03-27 00:54:38 +02:00
if " $JSONHEADER " ; then
fileout_json_footer
fi
2017-03-27 00:30:42 +02:00
# CSV: no footer
2017-03-27 00:54:38 +02:00
return 0
2016-01-23 19:18:33 +01:00
}
2017-08-28 18:25:45 +02:00
fileout_insert_warning( ) {
# See #815. Make sure we don't mess up the JSON PRETTY format if we complain with a client side warning.
# This should only be called if an *extra* warning will be printed (previously: 'fileout <extra_warning_ID> "WARN" '
# arg1: json identifier, arg2: normally "WARN", arg3: finding
if " $do_pretty_json " ; then
echo -e " \"clientProblem ${ CLIENT_PROB_NO } \" : [ " >>" $JSONFILE "
CLIENT_PROB_NO = $(( CLIENT_PROB_NO + 1 ))
FIRST_FINDING = true # make sure we don't have a comma here
fi
fileout " $1 " " $2 " " $3 "
if " $do_pretty_json " ; then
echo -e "\n ]," >>" $JSONFILE "
fi
}
2017-03-27 00:30:42 +02:00
# ID, SEVERITY, FINDING, CVE, CWE, HINT
2017-04-10 14:45:39 +02:00
fileout( ) {
2016-10-28 15:30:07 +02:00
local severity = " $2 "
2016-11-17 23:27:27 +01:00
local cwe = " $5 "
local hint = " $6 "
2016-10-28 15:30:07 +02:00
2017-03-09 18:55:04 +01:00
if ( " $do_pretty_json " && [ [ " $1 " = = "service" ] ] ) || show_finding " $severity " ; then
2016-10-28 15:30:07 +02:00
local finding = $( strip_lf " $( newline_to_spaces " $( strip_quote " $3 " ) " ) " )
2017-09-15 21:20:42 +02:00
[ [ -e " $JSONFILE " ] ] && ( fileout_json_finding " $1 " " $severity " " $finding " " $cve " " $cwe " " $hint " )
2017-03-27 00:30:42 +02:00
" $do_csv " && \
echo -e \" " $1 \" " ,\" $NODE /$NODEIP \" ,\" $PORT "\",\"" $severity "\",\"" $finding "\",\"" $cve "\",\"" $cwe "\",\"" $hint "\"" >> " $CSVFILE "
" $FIRST_FINDING " && FIRST_FINDING = false
2017-03-27 08:59:29 +02:00
fi
2017-03-27 00:30:42 +02:00
}
2016-10-28 15:30:07 +02:00
2017-03-27 00:30:42 +02:00
json_header( ) {
local fname_prefix
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
local filename_provided = false
[ [ -n " $JSONFILE " ] ] && [ [ ! -d " $JSONFILE " ] ] && filename_provided = true
2017-03-27 00:30:42 +02:00
# Similar to HTML: Don't create headers and footers in the following scenarios:
# * no JSON/CSV output is being created.
# * mass testing is being performed and each test will have its own file.
# * this is an individual test within a mass test and all output is being placed in a single file.
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
! " $do_json " && ! " $do_pretty_json " && JSONHEADER = false && return 0
" $do_mass_testing " && ! " $filename_provided " && JSONHEADER = false && return 0
" $CHILD_MASS_TESTING " && " $filename_provided " && JSONHEADER = false && return 0
2017-03-27 00:30:42 +02:00
if " $do_display_only " ; then
fname_prefix = "local-ciphers"
elif " $do_mass_testing " ; then
:
elif " $do_mx_all_ips " ; then
2017-11-14 19:41:25 +01:00
fname_prefix = " ${ FNAME_PREFIX } .mx- ${ URI } "
2017-03-27 00:30:42 +02:00
else
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
! " $filename_provided " && [ [ -z " $NODE " ] ] && parse_hn_port " ${ URI } "
2017-03-27 00:30:42 +02:00
# NODE, URL_PATH, PORT, IPADDR and IP46ADDR is set now --> wrong place
2017-11-14 19:41:25 +01:00
fname_prefix = " ${ FNAME_PREFIX } . ${ NODE } " _p" ${ PORT } "
2017-03-27 00:30:42 +02:00
fi
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
if [ [ -z " $JSONFILE " ] ] ; then
2017-04-07 09:49:44 +02:00
JSONFILE = " $fname_prefix - $( date +"%Y%m%d-%H%M" .json) "
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
elif [ [ -d " $JSONFILE " ] ] ; then
2017-11-14 19:41:25 +01:00
JSONFILE = " $JSONFILE / ${ fname_prefix } - $( date +"%Y%m%d-%H%M" .json) "
2017-03-27 00:30:42 +02:00
fi
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
if " $APPEND " ; then
JSONHEADER = false
else
[ [ -e " $JSONFILE " ] ] && fatal " \" $JSONFILE \" exists. Either use \"--append\" or (re)move it " 1
2017-09-15 15:09:13 +02:00
" $do_json " && echo "[" > " $JSONFILE "
2017-09-15 15:38:11 +02:00
" $do_pretty_json " && echo "{" > " $JSONFILE "
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
fi
2017-03-27 00:30:42 +02:00
#FIRST_FINDING=false
return 0
}
csv_header( ) {
local fname_prefix
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
local filename_provided = false
[ [ -n " $CSVFILE " ] ] && [ [ ! -d " $CSVFILE " ] ] && filename_provided = true
2017-03-27 00:30:42 +02:00
# CSV similar:
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
! " $do_csv " && CSVHEADER = false && return 0
" $do_mass_testing " && ! " $filename_provided " && CSVHEADER = false && return 0
" $CHILD_MASS_TESTING " && " $filename_provided " && CSVHEADER = false && return 0
2017-03-27 00:30:42 +02:00
if " $do_display_only " ; then
fname_prefix = "local-ciphers"
elif " $do_mass_testing " ; then
:
elif " $do_mx_all_ips " ; then
2017-11-14 19:41:25 +01:00
fname_prefix = " ${ FNAME_PREFIX } .mx- $URI "
2017-03-27 00:30:42 +02:00
else
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
! " $filename_provided " && [ [ -z " $NODE " ] ] && parse_hn_port " ${ URI } "
2017-03-27 00:30:42 +02:00
# NODE, URL_PATH, PORT, IPADDR and IP46ADDR is set now --> wrong place
2017-11-14 19:41:25 +01:00
fname_prefix = " ${ FNAME_PREFIX } . ${ NODE } " _p" ${ PORT } "
2017-03-27 00:30:42 +02:00
fi
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
if [ [ -z " $CSVFILE " ] ] ; then
2017-11-14 19:41:25 +01:00
CSVFILE = " ${ fname_prefix } - $( date +"%Y%m%d-%H%M" .csv) "
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
elif [ [ -d " $CSVFILE " ] ] ; then
2017-11-14 19:41:25 +01:00
CSVFILE = " $CSVFILE / ${ fname_prefix } - $( date +"%Y%m%d-%H%M" .csv) "
2016-01-23 23:33:17 +01:00
fi
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
if " $APPEND " ; then
CSVHEADER = false
else
[ [ -e " $CSVFILE " ] ] && fatal " \" $CSVFILE \" exists. Either use \"--append\" or (re)move it " 1
echo "\"id\",\"fqdn/ip\",\"port\",\"severity\",\"finding\",\"cve\",\"cwe\",\"hint\"" > " $CSVFILE "
fi
2017-03-27 00:30:42 +02:00
return 0
2016-01-23 19:18:33 +01:00
}
2017-03-18 22:24:35 +01:00
2017-03-27 00:30:42 +02:00
2017-03-18 22:24:35 +01:00
################# JSON FILE FORMATING END. HTML START ####################
2015-05-17 22:43:53 +02:00
2017-02-07 20:25:41 +01:00
html_header( ) {
2017-02-28 19:31:06 +01:00
local fname_prefix
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
local filename_provided = false
2017-03-31 12:24:25 +02:00
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
[ [ -n " $HTMLFILE " ] ] && [ [ ! -d " $HTMLFILE " ] ] && filename_provided = true
2017-02-28 19:31:06 +01:00
# Don't create HTML headers and footers in the following scenarios:
# * HTML output is not being created.
# * mass testing is being performed and each test will have its own HTML file.
# * this is an individual test within a mass test and all HTML output is being placed in a single file.
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
! " $do_html " && HTMLHEADER = false && return 0
" $do_mass_testing " && ! " $filename_provided " && HTMLHEADER = false && return 0
" $CHILD_MASS_TESTING " && " $filename_provided " && HTMLHEADER = false && return 0
2017-02-28 19:31:06 +01:00
if " $do_display_only " ; then
fname_prefix = "local-ciphers"
elif " $do_mass_testing " ; then
:
elif " $do_mx_all_ips " ; then
2017-11-14 19:41:25 +01:00
fname_prefix = " ${ FNAME_PREFIX } .mx- $URI "
2017-02-28 19:31:06 +01:00
else
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
! " $filename_provided " && [ [ -z " $NODE " ] ] && parse_hn_port " ${ URI } "
2017-03-27 00:54:38 +02:00
# NODE, URL_PATH, PORT, IPADDR and IP46ADDR is set now --> wrong place
2017-11-14 19:41:25 +01:00
fname_prefix = " ${ FNAME_PREFIX } . ${ NODE } " _p" ${ PORT } "
2017-02-28 19:31:06 +01:00
fi
2017-02-14 19:19:12 +01:00
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
if [ [ -z " $HTMLFILE " ] ] ; then
2017-04-07 09:49:44 +02:00
HTMLFILE = " $fname_prefix - $( date +"%Y%m%d-%H%M" .html) "
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
elif [ [ -d " $HTMLFILE " ] ] ; then
2017-04-07 09:49:44 +02:00
HTMLFILE = " $HTMLFILE / $fname_prefix - $( date +"%Y%m%d-%H%M" .html) "
2017-02-14 19:44:03 +01:00
fi
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
if " $APPEND " ; then
HTMLHEADER = false
else
[ [ -e " $HTMLFILE " ] ] && fatal " \" $HTMLFILE \" exists. Either use \"--append\" or (re)move it " 1
html_out "<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n"
html_out "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n"
html_out "<!-- This file was created with testssl.sh. https://testssl.sh -->\n"
html_out "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n"
html_out "<head>\n"
html_out "<meta http-equiv=\"Content-Type\" content=\"application/xml+xhtml; charset=UTF-8\" />\n"
html_out "<title>testssl.sh</title>\n"
html_out "</head>\n"
html_out "<body>\n"
html_out "<pre>\n"
fi
Handle --file option
Introduced "trick" so that if the `--file` option is used, `html_header()` will only be called once before anything is printed and `html_footer()` will only be called once after all printing is complete. With this, `html_header()` now delete the output file if it exists.
Also introduced the `html_reserved()`, which is called for all text to be sent to `out_html()`. `html_reserved()` converts any HTML reserved characters (", ', &, <, >) to their corresponding entity names (", ', &, <, >).
2017-02-09 23:03:21 +01:00
return 0
2017-02-07 20:25:41 +01:00
}
2017-02-14 19:19:12 +01:00
html_banner( ) {
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
if " $CHILD_MASS_TESTING " && " $HTMLHEADER " ; then
2017-02-28 19:31:06 +01:00
html_out " ## Scan started as: \" $PROG_NAME $CMDLINE \"\n "
html_out " ## at $HNAME : $OPENSSL_LOCATION \n "
html_out " ## version testssl: $VERSION ${ GIT_REL_SHORT :- $CVS_REL_SHORT } from $REL_DATE \n "
2017-09-22 18:48:38 +02:00
html_out " ## version openssl: \" $OSSL_NAME $OSSL_VER \" from \" $OSSL_BUILD_DATE \")\n\n "
2017-02-28 19:31:06 +01:00
fi
2017-02-14 19:19:12 +01:00
}
2017-02-07 20:25:41 +01:00
html_footer( ) {
2017-02-28 19:31:06 +01:00
if " $HTMLHEADER " ; then
html_out "</pre>\n"
html_out "</body>\n"
html_out "</html>\n"
fi
Handle --file option
Introduced "trick" so that if the `--file` option is used, `html_header()` will only be called once before anything is printed and `html_footer()` will only be called once after all printing is complete. With this, `html_header()` now delete the output file if it exists.
Also introduced the `html_reserved()`, which is called for all text to be sent to `out_html()`. `html_reserved()` converts any HTML reserved characters (", ', &, <, >) to their corresponding entity names (", ', &, <, >).
2017-02-09 23:03:21 +01:00
return 0
2017-02-07 20:25:41 +01:00
}
2017-03-18 22:24:35 +01:00
################# HTML FILE FORMATING END ####################
################### FILE FORMATING END #########################
2017-03-19 09:47:49 +01:00
###### START helper function definitions ######
2015-05-17 22:43:53 +02:00
2017-04-06 11:23:57 +02:00
if [ [ " $BASH_VERSINFO " = = 3 ] ] ; then
2017-04-06 11:33:54 +02:00
# older bash can do this only (MacOS X), even SLES 11, see #697
2017-03-31 12:24:25 +02:00
toupper( ) { tr 'a-z' 'A-Z' <<< " $1 " ; }
2017-04-05 17:28:06 +02:00
tolower( ) { tr 'A-Z' 'a-z' <<< " $1 " ; }
2017-04-06 11:23:57 +02:00
else
toupper( ) { echo -n " ${ 1 ^^ } " ; }
tolower( ) { echo -n " ${ 1 ,, } " ; }
2017-02-03 13:03:22 +01:00
fi
2015-05-17 22:43:53 +02:00
debugme( ) {
2016-01-23 19:18:33 +01:00
[ [ " $DEBUG " -ge 2 ] ] && " $@ "
2016-11-23 09:17:39 +01:00
return 0
2015-05-17 22:43:53 +02:00
}
2015-06-23 21:54:47 +02:00
hex2dec( ) {
2015-09-17 15:30:15 +02:00
#/usr/bin/printf -- "%d" 0x"$1"
echo $(( 16# $1 ))
2015-06-23 21:54:47 +02:00
}
2017-01-21 19:43:07 +01:00
# convert 414243 into ABC
hex2ascii( ) {
for ( ( i = 0; i<${# 1 } ; i += 2 ) ) ; do
printf " \x ${ 1 : $i : 2 } "
done
}
2017-04-18 23:15:32 +02:00
# convert decimal number < 256 to hex
dec02hex( ) {
printf "x%02x" " $1 "
}
# convert decimal number between 256 and < 256*256 to hex
dec04hex( ) {
local a = $( printf "%04x" " $1 " )
printf "x%02s, x%02s" " ${ a : 0 : 2 } " " ${ a : 2 : 2 } "
}
2015-06-23 21:54:47 +02:00
# trim spaces for BSD and old sed
count_lines( ) {
2016-09-29 21:03:48 +02:00
#echo "${$(wc -l <<< "$1")// /}"
# ^^ bad substitution under bash, zsh ok. For some reason this does the trick:
echo $( wc -l <<< " $1 " )
2015-06-23 21:54:47 +02:00
}
2016-09-29 21:03:48 +02:00
2015-06-23 21:54:47 +02:00
count_words( ) {
2016-09-29 21:03:48 +02:00
#echo "${$(wc -w <<< "$1")// /}"
# ^^ bad substitution under bash, zsh ok. For some reason this does the trick:
echo $( wc -w <<< " $1 " )
2015-06-23 21:54:47 +02:00
}
2015-08-24 23:50:03 +02:00
count_ciphers( ) {
2017-03-31 12:24:25 +02:00
echo $( wc -w <<< " ${ 1 // : / } " )
2015-08-24 23:50:03 +02:00
}
actually_supported_ciphers( ) {
2015-09-17 15:30:15 +02:00
$OPENSSL ciphers " $1 " 2>/dev/null || echo ""
2015-08-24 23:50:03 +02:00
}
2017-10-10 22:00:47 +02:00
# Given a protocol (arg1) and a list of ciphers (arg2) that is formatted as
# ", xx,xx, xx,xx, xx,xx, xx,xx" remove any TLSv1.3 ciphers if the protocol
# is less than 04 and remove any TLSv1.2-only ciphers if the protocol is less
# than 03.
strip_inconsistent_ciphers( ) {
local -i proto = 0x$1
local cipherlist = " $2 "
[ [ $proto -lt 4 ] ] && cipherlist = " ${ cipherlist //, 13,0[0-9a-fA-F]/ } "
if [ [ $proto -lt 3 ] ] ; then
cipherlist = " ${ cipherlist //, 00,3[b-fB-F]/ } "
cipherlist = " ${ cipherlist //, 00,40/ } "
cipherlist = " ${ cipherlist //, 00,6[7-9a-dA-D]/ } "
cipherlist = " ${ cipherlist //, 00,9[c-fC-F]/ } "
cipherlist = " ${ cipherlist //, 00,[abAB][0-9a-fA-F]/ } "
cipherlist = " ${ cipherlist //, 00,[cC][0-5]/ } "
cipherlist = " ${ cipherlist //, 16,[bB][7-9aA]/ } "
cipherlist = " ${ cipherlist //, [cC]0,2[3-9a-fA-F]/ } "
cipherlist = " ${ cipherlist //, [cC]0,3[01278a-fA-F]/ } "
cipherlist = " ${ cipherlist //, [cC]0,[4-9aA][0-9a-fA-F]/ } "
cipherlist = " ${ cipherlist //, [cC][cC],1[345]/ } "
cipherlist = " ${ cipherlist //, [cC][cC],[aA][89a-eA-E]/ } "
fi
echo " $cipherlist "
return 0
}
2015-06-23 21:54:47 +02:00
newline_to_spaces( ) {
2015-09-22 15:05:59 +02:00
tr '\n' ' ' <<< " $1 " | sed 's/ $//'
2015-06-23 21:54:47 +02:00
}
2015-09-30 14:54:39 +02:00
colon_to_spaces( ) {
2015-10-01 13:27:14 +02:00
echo " ${ 1 // : / } "
2015-09-30 14:54:39 +02:00
}
2015-08-12 13:58:45 +02:00
strip_lf( ) {
2016-10-06 18:53:25 +02:00
tr -d '\n' <<< " $1 " | tr -d '\r'
2015-08-12 13:58:45 +02:00
}
2015-09-22 15:05:59 +02:00
strip_spaces( ) {
echo " ${ 1 // / } "
}
2017-04-05 14:42:55 +02:00
# https://web.archive.org/web/20121022051228/http://codesnippets.joyent.com/posts/show/1816
strip_leading_space( ) {
echo " ${ 1 # " ${ 1 %%[ \! [ : space : ]]* } " } "
2016-09-28 20:32:01 +02:00
}
2017-04-05 14:42:55 +02:00
strip_trailing_space( ) {
echo " ${ 1 % " ${ 1 ##*[![ : space : ]] } " } "
}
2016-09-28 20:32:01 +02:00
2017-03-19 09:47:49 +01:00
# retrieve cipher from ServerHello (via openssl)
get_cipher( ) {
awk '/Cipher *:/ { print $3 }' " $1 "
#awk '/\<Cipher\>/ && !/Cipher is/ && !/^New/ { print $3 }' "$1"
}
# retrieve protocol from ServerHello (via openssl)
get_protocol( ) {
awk '/Protocol *:/ { print $3 }' " $1 "
}
is_number( ) {
[ [ " $1 " = ~ ^[ 1-9] [ 0-9] *$ ] ] && \
return 0 || \
return 1
}
is_ipv4addr( ) {
local octet = "(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])"
local ipv4address = " $octet \\. $octet \\. $octet \\. $octet "
[ [ -z " $1 " ] ] && return 1
# more than numbers, important for hosts like AAA.BBB.CCC.DDD.in-addr.arpa.DOMAIN.TLS
[ [ -n $( tr -d '0-9\.' <<< " $1 " ) ] ] && return 1
grep -Eq " $ipv4address " <<< " $1 " && \
return 0 || \
return 1
}
# a bit easier
is_ipv6addr( ) {
[ [ -z " $1 " ] ] && return 1
# less than 2x ":"
[ [ $( count_lines " $( tr ':' '\n' <<< " $1 " ) " ) -le 1 ] ] && \
return 1
#check on chars allowed:
[ [ -n " $( tr -d '0-9:a-fA-F ' <<< " $1 " | sed -e '/^$/d' ) " ] ] && \
return 1
return 0
}
2017-09-25 19:51:10 +02:00
# now some function for the integrated BIGIP F5 Cookie detector (see https://github.com/drwetter/F5-BIGIP-Decoder)
f5_hex2ip( ) {
debugme echo " $1 "
echo $(( 16# ${ 1 : 0 : 2 } )) .$(( 16# ${ 1 : 2 : 2 } )) .$(( 16# ${ 1 : 4 : 2 } )) .$(( 16# ${ 1 : 6 : 2 } ))
}
f5_hex2ip6( ) {
debugme echo " $1 "
echo " [ ${ 1 : 0 : 4 } : ${ 1 : 4 : 4 } : ${ 1 : 8 : 4 } : ${ 1 : 12 : 4 } . ${ 1 : 16 : 4 } : ${ 1 : 20 : 4 } : ${ 1 : 24 : 4 } : ${ 1 : 28 : 4 } ] "
}
f5_determine_routeddomain( ) {
local tmp
tmp = " ${ 1 %%o* } "
echo " ${ tmp /rd/ } "
}
f5_ip_oldstyle( ) {
local tmp
local a b c d
tmp = " ${ 1 /%.* } " # until first dot
tmp = " $( printf "%x8" " $tmp " ) " # convert the whole thing to hex, now back to ip (reversed notation:
tmp = " $( f5_hex2ip $tmp ) " # transform to ip with reversed notation
IFS = "." read -r a b c d <<< " $tmp " # reverse it
echo $d .$c .$b .$a
}
f5_port_decode( ) {
local tmp
tmp = " $( strip_lf " $1 " ) " # remove lf if there is one
tmp = " ${ tmp /.0000/ } " # to be sure remove trailing zeros with a dot
tmp = " ${ tmp #*. } " # get the port
tmp = " $( printf "%04x" " ${ tmp } " ) " # to hex
if [ [ ${# tmp } -eq 4 ] ] ; then
:
elif [ [ ${# tmp } -eq 3 ] ] ; then # fill it up with leading zeros if needed
tmp = 0{ $tmp }
elif [ [ ${# tmp } -eq 2 ] ] ; then
tmp = 00{ $tmp }
fi
echo $(( 16# ${ tmp : 2 : 2 } ${ tmp : 0 : 2 } )) # reverse order and convert it from hex to dec
}
2017-03-19 09:47:49 +01:00
###### END helper function definitions ######
2017-02-03 13:03:22 +01:00
# prints out multiple lines in $1, left aligned by spaces in $2
out_row_aligned( ) {
local first = true
2017-03-19 09:47:49 +01:00
while read line; do
2017-02-03 13:03:22 +01:00
" $first " && \
first = false || \
out " $2 "
outln " $line "
2017-03-19 09:47:49 +01:00
done <<< " $1 "
2017-02-03 13:03:22 +01:00
}
2015-07-21 20:35:49 +02:00
2017-02-09 17:36:24 +01:00
# prints text over multiple lines, trying to make no line longer than $max_width.
2017-03-28 19:54:54 +02:00
# Each line is indented with $spaces.
2017-02-09 17:36:24 +01:00
out_row_aligned_max_width( ) {
local text = " $1 "
local spaces = " $2 "
local -i max_width = " $3 "
2017-02-24 16:22:59 +01:00
local -i i len
2017-02-09 17:36:24 +01:00
local cr = $'\n'
2017-03-29 10:44:22 +02:00
local line
local first = true
2017-02-09 17:36:24 +01:00
max_width = $max_width -${# spaces }
len = ${# text }
while true; do
2017-03-28 19:54:54 +02:00
if [ [ $len -lt $max_width ] ] ; then
# If the remaining text to print is shorter than $max_width,
# then just print it.
2017-02-09 17:36:24 +01:00
i = $len
else
2017-03-28 19:54:54 +02:00
# Find the final space character in the text that is less than
# $max_width characters into the remaining text, and make the
# text up to that space character the next line to print.
line = " ${ text : 0 : max_width } "
line = " ${ line % * } "
i = ${# line }
if [ [ $i -eq $max_width ] ] ; then
# If there are no space characters in the first $max_width
# characters of the remaining text, then make the text up
# to the first space the next line to print. If there are
# no space characters in the remaining text, make the
# remaining text the next line to print.
line = " ${ text #* } "
i = $len -${# line }
[ [ $i -eq 0 ] ] && i = $len
fi
fi
if ! " $first " ; then
tm_out " ${ cr } ${ spaces } "
fi
tm_out " ${ text : 0 : i } "
[ [ $i -eq $len ] ] && break
len = $len -$i -1
i = $i +1
text = " ${ text : i : len } "
first = false
[ [ $len -eq 0 ] ] && break
2017-02-09 17:36:24 +01:00
done
return 0
}
2017-03-28 19:54:54 +02:00
out_row_aligned_max_width_by_entry( ) {
local text = " $1 "
local spaces = " $2 "
local -i max_width = " $3 "
local print_function = " $4 "
local resp entry prev_entry = " "
resp = " $( out_row_aligned_max_width " $text " " $spaces " " $max_width " ) "
while read -d " " entry; do
if [ [ -n " $entry " ] ] ; then
$print_function " $entry "
elif [ [ -n " $prev_entry " ] ] ; then
outln; out " "
fi
out " "
prev_entry = " $entry "
done <<< " $resp "
}
2017-04-22 15:39:18 +02:00
# saves $TMPFILE or file supplied in $2 under name "$TEMPDIR/$NODEIP.$1".
# Note: after finishing $TEMPDIR will be removed unless DEBUG >=1
2015-05-17 22:43:53 +02:00
tmpfile_handle( ) {
2017-04-22 15:39:18 +02:00
local savefile = " $2 "
[ [ -z " $savefile " ] ] && savefile = $TMPFILE
2017-05-15 19:47:13 +02:00
#FIXME: make sure/find out if we do not need $TEMPDIR/$NODEIP.$1" if debug=0. We would save fs access here
2017-04-22 15:39:18 +02:00
mv $savefile " $TEMPDIR / $NODEIP . $1 " 2>/dev/null
2015-10-11 23:07:16 +02:00
[ [ $ERRFILE = ~ dev.null ] ] && return 0 || \
2017-10-11 16:59:13 +02:00
mv $ERRFILE " $TEMPDIR / $NODEIP . ${ 1 //.txt/ } .errorlog " 2>/dev/null
2015-05-17 22:43:53 +02:00
}
2015-08-01 23:11:27 +02:00
# arg1: line with comment sign, tabs and so on
filter_input( ) {
2017-03-19 09:47:49 +01:00
sed -e 's/#.*$//' -e '/^$/d' <<< " $1 " | tr -d '\n' | tr -d '\t'
2015-08-01 23:11:27 +02:00
}
2017-02-04 14:13:33 +01:00
# dl's any URL (argv1) via HTTP 1.1 GET from port 80, arg2: file to store http body
# proxy is not honored (see cmd line switches)
http_get( ) {
2017-02-14 16:18:27 +01:00
local proto z
2017-02-04 14:13:33 +01:00
local node = "" query = ""
local dl = " $2 "
local useragent = " $UA_STD "
" $SNEAKY " && useragent = " $UA_SNEAKY "
2017-09-25 19:51:10 +02:00
IFS = / read proto z node query <<< " $1 "
2017-02-04 14:13:33 +01:00
2017-09-25 19:51:10 +02:00
exec 33<>/dev/tcp/$node /80
printf " GET / $query HTTP/1.1\r\nHost: $node \r\nUser-Agent: $useragent \r\nConnection: Close\r\nAccept: */*\r\n\r\n " >& 33
cat <& 33 | \
tr -d '\r' | sed '1,/^$/d' >$dl
# HTTP header stripped now, closing fd:
2017-02-04 14:13:33 +01:00
exec 33<& -
[ [ -s " $2 " ] ] && return 0 || return 1
}
# example usage:
# myfile=$(mktemp $TEMPDIR/http_get.XXXXXX.txt)
# http_get "http://crl.startssl.com/sca-server1.crl" "$myfile"
2015-05-17 22:43:53 +02:00
wait_kill( ) {
2015-09-17 15:30:15 +02:00
local pid = $1 # pid we wait for or kill
local maxsleep = $2 # how long we wait before killing
2015-09-28 22:54:00 +02:00
HAD_SLEPT = 0
2015-09-17 15:30:15 +02:00
while true; do
if ! ps $pid >/dev/null ; then
return 0 # process terminated before didn't reach $maxsleep
fi
2015-09-28 22:54:00 +02:00
[ [ " $DEBUG " -ge 6 ] ] && ps $pid
2015-09-17 15:30:15 +02:00
sleep 1
maxsleep = $(( maxsleep - 1 ))
2015-09-28 22:54:00 +02:00
HAD_SLEPT = $(( HAD_SLEPT + 1 ))
2015-09-17 15:30:15 +02:00
test $maxsleep -le 0 && break
done # needs to be killed:
kill $pid >& 2 2>/dev/null
wait $pid 2>/dev/null # make sure pid terminated, see wait(1p)
return 3 # means killed
2015-05-17 22:43:53 +02:00
}
2016-06-20 21:51:40 +02:00
# parse_date date format input-format
if " $HAS_GNUDATE " ; then # Linux and NetBSD
2017-09-25 19:51:10 +02:00
parse_date( ) {
LC_ALL = C date -d " $1 " " $2 "
}
2016-06-20 21:51:40 +02:00
elif " $HAS_FREEBSDDATE " ; then # FreeBSD and OS X
2017-09-25 19:51:10 +02:00
parse_date( ) {
LC_ALL = C date -j -f " $3 " " $2 " " $1 "
}
2016-06-20 21:51:40 +02:00
else
2017-09-25 19:51:10 +02:00
parse_date( ) {
LC_ALL = C date -j " $2 " " $1 "
}
2016-06-20 21:51:40 +02:00
fi
2015-05-17 22:43:53 +02:00
2016-09-28 23:15:37 +02:00
# arg1: An ASCII-HEX string
# arg2: file name
# Append $arg1 in binary format to $arg2
asciihex_to_binary_file( ) {
local string = " $1 "
local file = " $2 "
local -i len
local -i i ip2 ip4 ip6 ip8 ip10 ip12 ip14
local -i remainder
len = ${# string }
[ [ $len %2 -ne 0 ] ] && return 1
for ( ( i = 0; i <= len-16 ; i = i+16 ) ) ; do
ip2 = $i +2; ip4 = $i +4; ip6 = $i +6; ip8 = $i +8; ip10 = $i +10; ip12 = $i +12; ip14 = $i +14
2016-10-11 16:08:59 +02:00
printf -- " \x ${ string : i : 2 } \x ${ string : ip2 : 2 } \x ${ string : ip4 : 2 } \x ${ string : ip6 : 2 } \x ${ string : ip8 : 2 } \x ${ string : ip10 : 2 } \x ${ string : ip12 : 2 } \x ${ string : ip14 : 2 } " >> " $file "
2016-09-28 23:15:37 +02:00
done
ip2 = $i +2; ip4 = $i +4; ip6 = $i +6; ip8 = $i +8; ip10 = $i +10; ip12 = $i +12; ip14 = $i +14
remainder = $len -$i
case $remainder in
2016-10-11 16:08:59 +02:00
2) printf -- " \x ${ string : i : 2 } " >> " $file " ; ;
4) printf -- " \x ${ string : i : 2 } \x ${ string : ip2 : 2 } " >> " $file " ; ;
6) printf -- " \x ${ string : i : 2 } \x ${ string : ip2 : 2 } \x ${ string : ip4 : 2 } " >> " $file " ; ;
8) printf -- " \x ${ string : i : 2 } \x ${ string : ip2 : 2 } \x ${ string : ip4 : 2 } \x ${ string : ip6 : 2 } " >> " $file " ; ;
10) printf -- " \x ${ string : i : 2 } \x ${ string : ip2 : 2 } \x ${ string : ip4 : 2 } \x ${ string : ip6 : 2 } \x ${ string : ip8 : 2 } " >> " $file " ; ;
12) printf -- " \x ${ string : i : 2 } \x ${ string : ip2 : 2 } \x ${ string : ip4 : 2 } \x ${ string : ip6 : 2 } \x ${ string : ip8 : 2 } \x ${ string : ip10 : 2 } " >> " $file " ; ;
14) printf -- " \x ${ string : i : 2 } \x ${ string : ip2 : 2 } \x ${ string : ip4 : 2 } \x ${ string : ip6 : 2 } \x ${ string : ip8 : 2 } \x ${ string : ip10 : 2 } \x ${ string : ip12 : 2 } " >> " $file " ; ;
2016-09-28 23:15:37 +02:00
esac
return 0
}
Find more extensions in run_server_defaults()
This PR uses `tls_sockets()` to determine whether a server supports certain extensions that may not be supported by `$OPENSSL`. At the moment it checks for max_fragment_length, client_certificate_url, truncated_hmac, ALPN, signed_certificate_timestamp, encrypt_then_mac, and extended_master_secret.
In https://github.com/dcooper16/testssl.sh/blob/extended_tls_sockets/testssl.sh, `run_server_defaults()` is re-written to use `tls_sockets()` instead of `$OPENSSL`, with just one call to `$OPENSSL s_client` to get the session ticket, which reduces the dependence on `$OPENSSL`, but this PR limits the number of calls to `tls_sockets()`, which is still slow.
Note: I included ALPN in the `tls_sockets()` ClientHello since a single call to `tls_sockets()` cannot test for both NPN and ALPN, and since support for NPN was added to OpenSSL before support for ALPN was added, I figured it was more likely that `determine_tls_extensions()` had already determined whether the server supported NPN.
2016-11-08 18:36:25 +01:00
# arg1: text string
# Output a comma-separated ASCII-HEX string resprestation of the input string.
string_to_asciihex( ) {
local string = " $1 "
local -i i eos
local output = ""
eos = ${# string } -1
for ( ( i = 0; i<eos; i++ ) ) ; do
output += " $( printf "%02x," " ' ${ string : i : 1 } " ) "
done
[ [ -n " $string " ] ] && output += " $( printf "%02x" " ' ${ string : eos : 1 } " ) "
2017-02-25 16:31:30 +01:00
tm_out " $output "
Find more extensions in run_server_defaults()
This PR uses `tls_sockets()` to determine whether a server supports certain extensions that may not be supported by `$OPENSSL`. At the moment it checks for max_fragment_length, client_certificate_url, truncated_hmac, ALPN, signed_certificate_timestamp, encrypt_then_mac, and extended_master_secret.
In https://github.com/dcooper16/testssl.sh/blob/extended_tls_sockets/testssl.sh, `run_server_defaults()` is re-written to use `tls_sockets()` instead of `$OPENSSL`, with just one call to `$OPENSSL s_client` to get the session ticket, which reduces the dependence on `$OPENSSL`, but this PR limits the number of calls to `tls_sockets()`, which is still slow.
Note: I included ALPN in the `tls_sockets()` ClientHello since a single call to `tls_sockets()` cannot test for both NPN and ALPN, and since support for NPN was added to OpenSSL before support for ALPN was added, I figured it was more likely that `determine_tls_extensions()` had already determined whether the server supported NPN.
2016-11-08 18:36:25 +01:00
return 0
2017-01-05 20:20:19 +01:00
Find more extensions in run_server_defaults()
This PR uses `tls_sockets()` to determine whether a server supports certain extensions that may not be supported by `$OPENSSL`. At the moment it checks for max_fragment_length, client_certificate_url, truncated_hmac, ALPN, signed_certificate_timestamp, encrypt_then_mac, and extended_master_secret.
In https://github.com/dcooper16/testssl.sh/blob/extended_tls_sockets/testssl.sh, `run_server_defaults()` is re-written to use `tls_sockets()` instead of `$OPENSSL`, with just one call to `$OPENSSL s_client` to get the session ticket, which reduces the dependence on `$OPENSSL`, but this PR limits the number of calls to `tls_sockets()`, which is still slow.
Note: I included ALPN in the `tls_sockets()` ClientHello since a single call to `tls_sockets()` cannot test for both NPN and ALPN, and since support for NPN was added to OpenSSL before support for ALPN was added, I figured it was more likely that `determine_tls_extensions()` had already determined whether the server supported NPN.
2016-11-08 18:36:25 +01:00
}
2017-07-03 22:24:02 +02:00
# Adjust options to $OPENSSL s_client based on OpenSSL version and protocol version
s_client_options( ) {
2017-11-02 16:28:09 +01:00
local options = " $1 "
2017-09-19 23:16:41 +02:00
local ciphers
2017-07-03 22:24:02 +02:00
# Don't include the -servername option for an SSLv2 or SSLv3 ClientHello.
2017-10-11 16:59:13 +02:00
[ [ -n " $SNI " ] ] && [ [ " $options " = ~ \ -ssl[ 2| 3] \ ] ] && options = " ${ options // $SNI / } "
2017-07-03 22:24:02 +02:00
# The server_name extension should not be included in the ClientHello unless
# the -servername option is provided. However, OpenSSL 1.1.1 will include the
# server_name extension unless the -noservername option is provided. So, if
# the command line doesn't include -servername and the -noservername option is
# supported, then add -noservername to the options.
" $HAS_NOSERVERNAME " && [ [ ! " $options " = ~ " -servername " ] ] && options += " -noservername"
# Newer versions of OpenSSL have dropped support for the -no_ssl2 option, so
# remove any -no_ssl2 option if the option isn't supported. (Since versions of
# OpenSSL that don't support -no_ssl2 also don't support SSLv2, the option
# isn't needed for these versions of OpenSSL.)
2017-10-11 16:59:13 +02:00
! " $HAS_NO_SSL2 " && options = " ${ options //-no_ssl2/ } "
2017-07-03 22:24:02 +02:00
2017-09-19 23:16:41 +02:00
# If $OPENSSL is compiled with TLSv1.3 support and s_client is called without
# specifying a protocol, but specifying a list of ciphers that doesn't include
# any TLSv1.3 ciphers, then the command will always fail. So, if $OPENSSL supports
# TLSv1.3 and a cipher list is provided, but no protocol is specified, then add
# -no_tls1_3 if the list of ciphers doesn't include any TLSv1.3 ciphers.
if " $HAS_TLS13 " && [ [ " $options " = ~ " -cipher " ] ] && \
[ [ ! " $options " = ~ \ -ssl[ 2| 3] \ ] ] && \
[ [ ! " $options " = ~ \ -tls1\ ] ] && [ [ ! " $options " = ~ \ -tls1_[ 1| 2| 3] \ ] ] ; then
ciphers = " ${ options #* -cipher } "
ciphers = " ${ ciphers %% * } "
2017-10-27 19:07:04 +02:00
[ [ ! " $( $OPENSSL ciphers " $ciphers " 2>/dev/null) " = ~ TLS13 ] ] && options += " -no_tls1_3"
2017-09-19 23:16:41 +02:00
fi
2017-07-03 22:24:02 +02:00
tm_out " $options "
}
2015-05-17 22:43:53 +02:00
###### check code starts here ######
# determines whether the port has an HTTP service running or not (plain TLS, no STARTTLS)
# arg1 could be the protocol determined as "working". IIS6 needs that
2016-10-11 22:30:30 +02:00
service_detection( ) {
2015-10-05 09:56:21 +02:00
local -i ret = 0
2015-10-11 23:07:16 +02:00
local -i was_killed
2015-10-05 09:56:21 +02:00
2017-06-12 18:23:55 +02:00
if ! " $CLIENT_AUTH " ; then
2017-05-15 19:47:13 +02:00
# SNI is not standardardized for !HTTPS but fortunately for other protocols s_client doesn't seem to care
2017-07-03 22:24:02 +02:00
printf " $GET_REQ11 " | $OPENSSL s_client $( s_client_options " $1 -quiet $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE &
2015-10-11 23:07:16 +02:00
wait_kill $! $HEADER_MAXSLEEP
was_killed = $?
2017-05-15 19:47:13 +02:00
head $TMPFILE | grep -aq '^HTTP\/' && SERVICE = HTTP
2017-07-30 22:46:17 +02:00
[ [ -z " $SERVICE " ] ] && head $TMPFILE | grep -waq "SMTP|ESMTP|Exim|IdeaSmtpServer|Kerio Connect|Postfix" && SERVICE = SMTP # I know some overlap here
[ [ -z " $SERVICE " ] ] && head $TMPFILE | egrep -waq "POP|Gpop|MailEnable POP3 Server|OK Dovecot|Cyrus POP3" && SERVICE = POP # I know some overlap here
[ [ -z " $SERVICE " ] ] && head $TMPFILE | egrep -waq "IMAP|IMAP4|Cyrus IMAP4IMAP4rev1|IMAP4REV1|Gimap" && SERVICE = IMAP # I know some overlap here
[ [ -z " $SERVICE " ] ] && head $TMPFILE | grep -aq FTP && SERVICE = FTP
[ [ -z " $SERVICE " ] ] && head $TMPFILE | egrep -aqi "jabber|xmpp" && SERVICE = XMPP
2017-05-15 19:47:13 +02:00
[ [ -z " $SERVICE " ] ] && head $TMPFILE | egrep -aqw "Jive News|InterNetNews|NNRP|INN" && SERVICE = NNTP
2017-09-27 00:57:28 +02:00
# MongoDB port 27017 will respond to a GET request with a mocked HTTP response
[ [ " $SERVICE " = = HTTP ] ] && head $TMPFILE | egrep -aqw "MongoDB" && SERVICE = MongoDB
2017-05-15 19:47:13 +02:00
debugme head -50 $TMPFILE | sed -e '/<HTML>/,$d' -e '/<html>/,$d' -e '/<XML/,$d' -e '/<xml/,$d' -e '/<\?XML/,$d' -e '/<\?xml/,$d' -e '/<\!DOCTYPE/,$d' -e '/<\!doctype/,$d'
2015-10-11 23:07:16 +02:00
fi
2015-05-17 22:43:53 +02:00
2015-10-05 09:56:21 +02:00
out " Service detected: $CORRECT_SPACES "
2015-09-17 15:30:15 +02:00
case $SERVICE in
HTTP)
out " $SERVICE "
2016-01-23 23:33:17 +01:00
fileout "service" "INFO" " Service detected: $SERVICE "
2017-03-18 22:24:35 +01:00
ret = 0
; ;
2017-09-27 00:57:28 +02:00
IMAP| POP| SMTP| NNTP| MongoDB)
2015-09-17 15:30:15 +02:00
out " $SERVICE , thus skipping HTTP specific checks "
2016-01-23 23:33:17 +01:00
fileout "service" "INFO" " Service detected: $SERVICE , thus skipping HTTP specific checks "
2017-03-18 22:24:35 +01:00
ret = 0
; ;
2017-06-12 18:23:55 +02:00
*) if " $CLIENT_AUTH " ; then
2015-10-11 23:07:16 +02:00
out "certificate based authentication => skipping all HTTP checks"
echo "certificate based authentication => skipping all HTTP checks" >$TMPFILE
2016-06-07 23:06:58 +02:00
fileout "client_auth" "INFO" "certificate based authentication => skipping all HTTP checks"
2015-09-17 15:30:15 +02:00
else
2015-10-11 23:07:16 +02:00
out " Couldn't determine what's running on port $PORT "
2016-10-11 22:30:30 +02:00
if " $ASSUME_HTTP " ; then
2015-10-11 23:07:16 +02:00
SERVICE = HTTP
2016-10-11 22:30:30 +02:00
out " -- ASSUME_HTTP set though"
fileout "service" "DEBUG" "Couldn't determine service, --ASSUME_HTTP set"
2015-10-11 23:07:16 +02:00
ret = 0
else
out ", assuming no HTTP service => skipping all HTTP checks"
2016-06-07 23:06:58 +02:00
fileout "service" "DEBUG" "Couldn't determine service, skipping all HTTP checks"
2015-10-11 23:07:16 +02:00
ret = 1
fi
2015-09-17 15:30:15 +02:00
fi
; ;
esac
2015-11-03 10:30:59 +01:00
outln "\n"
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
return $ret
2015-05-17 22:43:53 +02:00
}
2015-10-11 23:07:16 +02:00
2015-05-17 22:43:53 +02:00
#problems not handled: chunked
2015-07-22 13:11:20 +02:00
run_http_header( ) {
2017-07-03 22:24:02 +02:00
local header
2015-09-17 15:30:15 +02:00
local -i ret
local referer useragent
2015-12-22 21:08:52 +01:00
local url redirect
2015-09-17 15:30:15 +02:00
2016-01-31 21:02:18 +01:00
HEADERFILE = $TEMPDIR /$NODEIP .http_header.txt
2015-10-15 14:15:07 +02:00
outln; pr_headlineln " Testing HTTP header response @ \" $URL_PATH \" "
outln
2015-09-17 15:30:15 +02:00
[ [ -z " $1 " ] ] && url = "/" || url = " $1 "
2017-07-03 22:24:02 +02:00
printf " $GET_REQ11 " | $OPENSSL s_client $( s_client_options " $OPTIMAL_PROTO $BUGS -quiet -ign_eof -connect $NODEIP : $PORT $PROXY $SNI " ) >$HEADERFILE 2>$ERRFILE &
2015-09-28 22:54:00 +02:00
wait_kill $! $HEADER_MAXSLEEP
if [ [ $? -eq 0 ] ] ; then
# we do the get command again as it terminated within $HEADER_MAXSLEEP. Thus it didn't hang, we do it
2016-07-26 18:07:08 +02:00
# again in the foreground to get an accurate header time!
2017-07-03 22:24:02 +02:00
printf " $GET_REQ11 " | $OPENSSL s_client $( s_client_options " $OPTIMAL_PROTO $BUGS -quiet -ign_eof -connect $NODEIP : $PORT $PROXY $SNI " ) >$HEADERFILE 2>$ERRFILE
2015-09-28 22:54:00 +02:00
NOW_TIME = $( date "+%s" )
HTTP_TIME = $( awk -F': ' '/^date:/ { print $2 } /^Date:/ { print $2 }' $HEADERFILE )
HAD_SLEPT = 0
2015-09-17 15:30:15 +02:00
else
2015-09-28 22:54:00 +02:00
# GET request needed to be killed before, try, whether it succeeded:
if egrep -iaq "XML|HTML|DOCTYPE|HTTP|Connection" $HEADERFILE ; then
NOW_TIME = $(( $( date "+%s" ) - HAD_SLEPT)) # correct by seconds we slept
HTTP_TIME = $( awk -F': ' '/^date:/ { print $2 } /^Date:/ { print $2 }' $HEADERFILE )
else
2016-03-05 21:07:49 +01:00
pr_warning " likely HTTP header requests failed (#lines: $( wc -l < $HEADERFILE | sed 's/ //g' ) ). "
2015-09-17 15:30:15 +02:00
outln "Rerun with DEBUG=1 and inspect \"run_http_header.txt\"\n"
debugme cat $HEADERFILE
2015-09-28 22:54:00 +02:00
return 7
2015-09-17 15:30:15 +02:00
fi
fi
2015-09-28 22:54:00 +02:00
# populate vars for HTTP time
debugme echo " $NOW_TIME : $HTTP_TIME "
2016-10-28 15:30:07 +02:00
2016-01-31 21:02:18 +01:00
# delete from pattern til the end. We ignore any leading spaces (e.g. www.amazon.de)
2017-05-15 19:47:13 +02:00
sed -e '/<HTML>/,$d' -e '/<html>/,$d' -e '/<XML/,$d' -e '/<xml/,$d' \
-e '/<\?XML/,$d' -e '/<?xml/,$d' -e '/<\!DOCTYPE/,$d' -e '/<\!doctype/,$d' $HEADERFILE >$HEADERFILE .2
2015-09-28 22:54:00 +02:00
#### ^^^ Attention: the filtering for the html body only as of now, doesn't work for other content yet
mv $HEADERFILE .2 $HEADERFILE # sed'ing in place doesn't work with BSD and Linux simultaneously
ret = 0
2016-09-21 20:32:04 +02:00
HTTP_STATUS_CODE = $( awk '/^HTTP\// { print $2 }' $HEADERFILE 2>>$ERRFILE )
msg_thereafter = $( awk -F" $HTTP_STATUS_CODE " '/^HTTP\// { print $2 }' $HEADERFILE 2>>$ERRFILE ) # dirty trick to use the status code as a
2015-09-17 15:30:15 +02:00
msg_thereafter = $( strip_lf " $msg_thereafter " ) # field separator, otherwise we need a loop with awk
2016-09-21 20:32:04 +02:00
debugme echo " Status/MSG: $HTTP_STATUS_CODE $msg_thereafter "
2015-09-17 15:30:15 +02:00
pr_bold " HTTP Status Code "
2016-09-21 20:32:04 +02:00
[ [ -z " $HTTP_STATUS_CODE " ] ] && pr_cyan "No status code" && return 3
2015-09-17 15:30:15 +02:00
2016-09-21 20:32:04 +02:00
out " $HTTP_STATUS_CODE $msg_thereafter "
case $HTTP_STATUS_CODE in
2016-01-23 19:18:33 +01:00
301| 302| 307| 308)
2015-12-22 20:31:52 +01:00
redirect = $( grep -a '^Location' $HEADERFILE | sed 's/Location: //' | tr -d '\r\n' )
2017-02-17 22:40:50 +01:00
out ", redirecting to \"" ; pr_url " $redirect " ; out "\""
2016-02-22 10:44:43 +01:00
if [ [ $redirect = = "http://" * ] ] ; then
2016-03-01 20:25:41 +01:00
pr_svrty_high " -- Redirect to insecure URL (NOT ok)"
2017-09-18 18:18:05 +02:00
fileout "insecure_redirect" "HIGH" " Redirect to insecure URL: \" $redirect \" "
2015-12-21 20:59:40 +01:00
fi
2017-09-18 18:18:05 +02:00
fileout "HTTP_STATUS_CODE" "INFO" " $HTTP_STATUS_CODE $msg_thereafter (\" $URL_PATH \" tested) "
2016-01-23 19:18:33 +01:00
; ;
2017-09-18 18:18:05 +02:00
200| 204| 403| 405)
fileout "HTTP_STATUS_CODE" "INFO" " $HTTP_STATUS_CODE $msg_thereafter (\" $URL_PATH \" tested) "
2016-03-12 17:08:43 +01:00
; ;
2016-01-23 19:18:33 +01:00
206)
2017-09-18 18:18:05 +02:00
out " -- WHAT?"
fileout "HTTP_STATUS_CODE" "INFO" " $HTTP_STATUS_CODE $msg_thereafter (\" $URL_PATH \" tested) -- WHAT? "
# partial content shouldn't happen
2016-01-23 19:18:33 +01:00
; ;
400)
2016-03-05 21:07:49 +01:00
pr_cyan " (Hint: better try another URL)"
2017-09-18 18:18:05 +02:00
fileout "HTTP_STATUS_CODE" "INFO" " $HTTP_STATUS_CODE $msg_thereafter (\" $URL_PATH \" tested) -- better try another URL "
2015-12-21 20:59:40 +01:00
; ;
2016-01-23 19:18:33 +01:00
401)
2017-02-10 16:59:20 +01:00
grep -aq "^WWW-Authenticate" $HEADERFILE && out " " ; out " $( strip_lf " $( grep -a "^WWW-Authenticate" $HEADERFILE ) " ) "
2017-09-18 18:18:05 +02:00
fileout "HTTP_STATUS_CODE" "INFO" " $HTTP_STATUS_CODE $msg_thereafter (\" $URL_PATH \" tested) -- $( grep -a "^WWW-Authenticate" $HEADERFILE ) "
2016-01-23 19:18:33 +01:00
; ;
404)
2016-09-21 20:32:04 +02:00
out " (Hint: supply a path which doesn't give a \" $HTTP_STATUS_CODE $msg_thereafter \") "
2017-09-18 18:18:05 +02:00
fileout "HTTP_STATUS_CODE" "INFO" " $HTTP_STATUS_CODE $msg_thereafter (\" $URL_PATH \" tested) -- better supply a path which doesn't give a \" $HTTP_STATUS_CODE $msg_thereafter \" "
2016-01-23 19:18:33 +01:00
; ;
*)
2016-09-21 20:32:04 +02:00
pr_warning " . Oh, didn't expect \" $HTTP_STATUS_CODE $msg_thereafter \" "
2017-10-31 12:23:16 +01:00
fileout "HTTP_STATUS_CODE" "WARN" " $HTTP_STATUS_CODE $msg_thereafter (\" $URL_PATH \" tested) -- Oops, didn't expect a \" $HTTP_STATUS_CODE $msg_thereafter \" "
2015-09-17 15:30:15 +02:00
; ;
esac
outln
# we don't call "tmpfile_handle $FUNCNAME.txt" as we need the header file in other functions!
return $ret
2015-05-17 22:43:53 +02:00
}
2015-08-21 10:47:29 +02:00
# Borrowed from Glenn Jackman, see https://unix.stackexchange.com/users/4667/glenn-jackman
2015-06-19 20:36:32 +02:00
detect_ipv4( ) {
2015-09-17 15:30:15 +02:00
local octet = "(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])"
local ipv4address = " $octet \\. $octet \\. $octet \\. $octet "
2017-08-01 15:37:40 +02:00
local whitelisted_header = "pagespeed|page-speed|^Content-Security-Policy|^MicrosoftSharePointTeamServices|^X-OWA-Version|^Location|^Server: PRTG"
2015-09-17 15:30:15 +02:00
local your_ip_msg = "(check if it's your IP address or e.g. a cluster IP)"
local result
local first = true
local spaces = " "
2016-01-23 19:18:33 +01:00
local count
2015-09-17 15:30:15 +02:00
if [ [ ! -s $HEADERFILE ] ] ; then
2015-09-29 18:47:49 +02:00
run_http_header " $1 " || return 3
2015-09-17 15:30:15 +02:00
fi
2016-03-19 17:20:36 +01:00
# white list some headers as they are mistakenly identified as ipv4 address. Issues 158, 323,o facebook has a CSP rule for 127.0.0.1
if egrep -vi " $whitelisted_header " $HEADERFILE | grep -iqE " $ipv4address " ; then
2016-01-23 19:18:33 +01:00
pr_bold " IPv4 address in header "
count = 0
2015-09-17 15:30:15 +02:00
while read line; do
result = " $( grep -E " $ipv4address " <<< " $line " ) "
result = $( strip_lf " $result " )
if [ [ -n " $result " ] ] ; then
if ! $first ; then
out " $spaces "
your_ip_msg = ""
else
first = false
fi
2017-09-25 19:51:10 +02:00
pr_svrty_medium " $result "
2016-01-31 21:02:18 +01:00
outln " \n $spaces $your_ip_msg "
2017-09-25 19:51:10 +02:00
fileout " ip_in_header_ $count " "MEDIUM" " IPv4 address in header $result $your_ip_msg "
2015-09-17 15:30:15 +02:00
fi
2016-01-23 19:18:33 +01:00
count = $count +1
2015-09-17 15:30:15 +02:00
done < $HEADERFILE
fi
2016-01-23 19:18:33 +01:00
}
2015-06-19 20:36:32 +02:00
2015-08-28 00:15:51 +02:00
run_http_date( ) {
2015-09-17 15:30:15 +02:00
local now difftime
if [ [ ! -s $HEADERFILE ] ] ; then
run_http_header " $1 " || return 3 # this is just for the line "Testing HTTP header response"
fi
pr_bold " HTTP clock skew "
if [ [ $SERVICE != "HTTP" ] ] ; then
out "not tested as we're not targeting HTTP"
else
if [ [ -n " $HTTP_TIME " ] ] ; then
2016-06-20 21:51:40 +02:00
HTTP_TIME = $( parse_date " $HTTP_TIME " "+%s" "%a, %d %b %Y %T %Z" 2>>$ERRFILE ) # the trailing \r confuses BSD flavors otherwise
2015-09-17 15:30:15 +02:00
2017-04-12 21:00:08 +02:00
difftime = $(( HTTP_TIME - NOW_TIME))
2015-09-17 15:30:15 +02:00
[ [ $difftime != "-" * ] ] && [ [ $difftime != "0" ] ] && difftime = " + $difftime "
2015-09-28 22:54:00 +02:00
# process was killed, so we need to add an error:
[ [ $HAD_SLEPT -ne 0 ] ] && difftime = " $difftime (± 1.5) "
2015-09-17 15:30:15 +02:00
out " $difftime sec from localtime " ;
2016-01-23 23:33:17 +01:00
fileout "http_clock_skew" "INFO" " HTTP clock skew $difftime sec from localtime "
2015-09-17 15:30:15 +02:00
else
out "Got no HTTP time, maybe try different URL?" ;
2016-01-23 23:33:17 +01:00
fileout "http_clock_skew" "INFO" "HTTP clock skew not measured. Got no HTTP time, maybe try different URL?"
2015-09-17 15:30:15 +02:00
fi
2017-02-25 16:31:30 +01:00
debugme tm_out " , epoch: $HTTP_TIME "
2015-09-17 15:30:15 +02:00
fi
outln
detect_ipv4
2015-08-28 00:15:51 +02:00
}
2015-06-19 20:36:32 +02:00
2016-10-03 18:52:48 +02:00
2016-10-01 22:25:14 +02:00
# HEADERFILE needs to contain the HTTP header (made sure by invoker)
# arg1: key=word to match
2017-06-07 09:54:24 +02:00
# arg2: hint for fileout() if double header
# args3:indentation, i.e string w spaces
2016-10-03 18:52:48 +02:00
# returns:
# 0 if header not found
# 1-n nr of headers found, then in HEADERVALUE the first value from key
detect_header( ) {
local key = " $1 "
2017-06-07 09:54:24 +02:00
local spaces = " $3 "
2016-10-03 18:52:48 +02:00
local -i nr = 0
2016-10-01 22:25:14 +02:00
2016-10-03 18:52:48 +02:00
nr = $( grep -Faciw " $key : " $HEADERFILE )
if [ [ $nr -eq 0 ] ] ; then
HEADERVALUE = ""
return 0
elif [ [ $nr -eq 1 ] ] ; then
2017-04-05 14:42:55 +02:00
HEADERVALUE = $( grep -Faiw " $key : " $HEADERFILE )
HEADERVALUE = ${ HEADERVALUE #* : } # remove leading part=key to colon
HEADERVALUE = " $( strip_leading_space " $HEADERVALUE " ) "
2016-10-03 18:52:48 +02:00
return 1
2016-11-04 02:54:56 +01:00
else
2016-10-03 21:17:29 +02:00
pr_svrty_medium "misconfiguration: "
2016-10-03 18:52:48 +02:00
pr_italic " $key "
2016-10-03 21:17:29 +02:00
pr_svrty_medium " ${ nr } x "
2017-06-07 09:54:24 +02:00
out " -- checking first one only"
2016-10-01 22:25:14 +02:00
out " \n $spaces "
2017-04-05 14:42:55 +02:00
HEADERVALUE = $( grep -Faiw " $key : " $HEADERFILE | head -1)
HEADERVALUE = ${ HEADERVALUE #* : }
HEADERVALUE = " $( strip_leading_space " $HEADERVALUE " ) "
2017-02-25 16:31:30 +01:00
[ [ $DEBUG -ge 2 ] ] && tm_italic " $HEADERVALUE " && tm_out " \n $spaces "
2017-06-07 09:54:24 +02:00
fileout " ${ 2 } _multiple " "MEDIUM" " Multiple $2 headers. Using first header: $HEADERVALUE "
2016-10-03 18:52:48 +02:00
return $nr
2016-10-01 22:25:14 +02:00
fi
}
2015-05-17 22:43:53 +02:00
includeSubDomains( ) {
2015-09-17 15:30:15 +02:00
if grep -aiqw includeSubDomains " $1 " ; then
2016-03-01 20:36:41 +01:00
pr_done_good ", includeSubDomains"
2016-06-21 08:57:39 +02:00
return 0
2015-09-17 15:30:15 +02:00
else
pr_litecyan ", just this domain"
2016-06-23 12:04:45 +02:00
return 1
2015-09-17 15:30:15 +02:00
fi
2015-05-17 22:43:53 +02:00
}
preload( ) {
2016-01-23 19:18:33 +01:00
if grep -aiqw preload " $1 " ; then
2016-03-01 20:36:41 +01:00
pr_done_good ", preload"
2016-01-23 19:18:33 +01:00
return 0
2016-06-21 08:57:39 +02:00
else
2016-06-23 12:04:45 +02:00
return 1
2016-01-23 19:18:33 +01:00
fi
2015-05-17 22:43:53 +02:00
}
2015-06-19 20:36:32 +02:00
2015-07-22 13:11:20 +02:00
run_hsts( ) {
2015-09-17 15:30:15 +02:00
local hsts_age_sec
local hsts_age_days
2016-10-01 10:04:33 +02:00
local spaces = " "
2015-09-17 15:30:15 +02:00
if [ [ ! -s $HEADERFILE ] ] ; then
2015-09-29 18:47:49 +02:00
run_http_header " $1 " || return 3
2015-09-17 15:30:15 +02:00
fi
pr_bold " Strict Transport Security "
2017-06-07 09:54:24 +02:00
detect_header "Strict-Transport-Security" "HSTS" " $spaces "
2016-10-03 18:52:48 +02:00
if [ [ $? -ne 0 ] ] ; then
echo " $HEADERVALUE " >$TMPFILE
2016-10-01 22:25:14 +02:00
hsts_age_sec = $( sed -e 's/[^0-9]*//g' <<< $HEADERVALUE )
2016-09-01 12:42:56 +02:00
debugme echo " hsts_age_sec: $hsts_age_sec "
2016-06-21 21:24:24 +02:00
if [ [ -n $hsts_age_sec ] ] ; then
hsts_age_days = $(( hsts_age_sec / 86400 ))
else
hsts_age_days = -1
fi
if [ [ $hsts_age_days -eq -1 ] ] ; then
pr_svrty_medium "HSTS max-age is required but missing. Setting 15552000 s (180 days) or more is recommended"
fileout "hsts_time" "MEDIUM" "HSTS max-age missing. 15552000 s (180 days) or more recommnded"
2016-09-01 12:42:56 +02:00
elif [ [ $hsts_age_sec -eq 0 ] ] ; then
2016-06-21 21:24:24 +02:00
pr_svrty_medium "HSTS max-age is set to 0. HSTS is disabled"
fileout "hsts_time" "MEDIUM" "HSTS max-age set to 0. HSTS is disabled"
2016-09-01 12:42:56 +02:00
elif [ [ $hsts_age_sec -gt $HSTS_MIN ] ] ; then
2016-03-01 20:36:41 +01:00
pr_done_good " $hsts_age_days days " ; out " = $hsts_age_sec s "
2016-01-23 23:33:17 +01:00
fileout "hsts_time" "OK" " HSTS timeout $hsts_age_days days (= $hsts_age_sec seconds) > $HSTS_MIN days "
2015-09-17 15:30:15 +02:00
else
2016-09-01 12:42:56 +02:00
pr_svrty_medium " $hsts_age_sec s = $hsts_age_days days is too short ( >= $HSTS_MIN s recommended) "
2016-05-27 17:43:45 +02:00
fileout "hsts_time" "MEDIUM" " HSTS timeout too short. $hsts_age_days days (= $hsts_age_sec seconds) < $HSTS_MIN days "
2016-01-23 19:18:33 +01:00
fi
if includeSubDomains " $TMPFILE " ; then
2016-01-23 23:33:17 +01:00
fileout "hsts_subdomains" "OK" "HSTS includes subdomains"
2016-01-23 19:18:33 +01:00
else
2016-10-01 22:25:14 +02:00
fileout "hsts_subdomains" "INFO" "HSTS only for this domain"
2016-01-23 19:18:33 +01:00
fi
if preload " $TMPFILE " ; then
2016-01-23 23:33:17 +01:00
fileout "hsts_preload" "OK" "HSTS domain is marked for preloading"
2016-01-23 19:18:33 +01:00
else
2016-01-23 23:33:17 +01:00
fileout "hsts_preload" "INFO" "HSTS domain is NOT marked for preloading"
2016-11-04 02:54:56 +01:00
#FIXME: To be checked against preloading lists,
2016-10-03 18:52:48 +02:00
# e.g. https://dxr.mozilla.org/mozilla-central/source/security/manager/boot/src/nsSTSPreloadList.inc
# https://chromium.googlesource.com/chromium/src/+/master/net/http/transport_security_state_static.json
2015-09-17 15:30:15 +02:00
fi
else
out "--"
2016-10-28 15:30:07 +02:00
fileout "hsts" "HIGH" "No support for HTTP Strict Transport Security"
2015-09-17 15:30:15 +02:00
fi
outln
tmpfile_handle $FUNCNAME .txt
return $?
2015-05-17 22:43:53 +02:00
}
2015-07-21 20:35:49 +02:00
2015-07-22 13:11:20 +02:00
run_hpkp( ) {
2015-09-17 15:30:15 +02:00
local -i hpkp_age_sec
local -i hpkp_age_days
local -i hpkp_nr_keys
2016-10-06 18:53:25 +02:00
local hpkp_spki hpkp_spki_hostcert
local -a backup_spki
2015-09-17 15:30:15 +02:00
local spaces = " "
2016-10-06 18:53:25 +02:00
local spaces_indented = " "
local certificate_found = false
2015-09-17 15:30:15 +02:00
local i
2016-01-23 19:18:33 +01:00
local hpkp_headers
local first_hpkp_header
2016-10-06 18:53:25 +02:00
local spki
2016-10-27 21:59:10 +02:00
local ca_hashes = " $TESTSSL_INSTALL_DIR /etc/ca_hashes.txt "
2015-09-17 15:30:15 +02:00
if [ [ ! -s $HEADERFILE ] ] ; then
2015-09-29 18:47:49 +02:00
run_http_header " $1 " || return 3
2015-09-17 15:30:15 +02:00
fi
pr_bold " Public Key Pinning "
egrep -aiw '^Public-Key-Pins|Public-Key-Pins-Report-Only' $HEADERFILE >$TMPFILE
if [ [ $? -eq 0 ] ] ; then
if egrep -aciw '^Public-Key-Pins|Public-Key-Pins-Report-Only' $HEADERFILE | egrep -waq "1" ; then
:
else
2016-01-23 19:18:33 +01:00
hpkp_headers = ""
2017-06-07 09:54:24 +02:00
pr_svrty_medium "misconfiguration, multiple HPKP headers: "
2016-02-01 13:23:28 +01:00
# https://scotthelme.co.uk is a candidate
#FIXME: should display both Public-Key-Pins+Public-Key-Pins-Report-Only --> egrep -ai -w
2015-09-17 15:30:15 +02:00
for i in $( newline_to_spaces " $( egrep -ai '^Public-Key-Pins' $HEADERFILE | awk -F':' '/Public-Key-Pins/ { print $1 }' ) " ) ; do
2015-10-15 14:15:07 +02:00
pr_italic $i
2016-01-23 19:18:33 +01:00
hpkp_headers = " $hpkp_headers $i "
2015-09-17 15:30:15 +02:00
out " "
done
2017-06-07 09:54:24 +02:00
out " \n $spaces Examining first: "
2016-01-23 19:18:33 +01:00
first_hpkp_header = $( awk -F':' '/Public-Key-Pins/ { print $1 }' $HEADERFILE | head -1)
pr_italic " $first_hpkp_header , "
2016-10-06 18:53:25 +02:00
fileout "hpkp_multiple" "WARN" " Multiple HPKP headers $hpkp_headers . Using first header: $first_hpkp_header "
2015-09-17 15:30:15 +02:00
fi
# remove leading Public-Key-Pins*, any colons, double quotes and trailing spaces and taking the first -- whatever that is
sed -e 's/Public-Key-Pins://g' -e s'/Public-Key-Pins-Report-Only://' $TMPFILE | \
sed -e 's/;//g' -e 's/\"//g' -e 's/^ //' | head -1 > $TMPFILE .2
# BSD lacks -i, otherwise we would have done it inline
# now separate key value and other stuff per line:
tr ' ' '\n' < $TMPFILE .2 >$TMPFILE
hpkp_nr_keys = $( grep -ac pin-sha $TMPFILE )
if [ [ $hpkp_nr_keys -eq 1 ] ] ; then
2016-10-06 18:53:25 +02:00
pr_svrty_high "1 key (NOT ok), "
fileout "hpkp_spkis" "HIGH" "Only one key pinned in HPKP header, this means the site may become unavailable if the key is revoked"
2015-09-17 15:30:15 +02:00
else
2016-10-06 18:53:25 +02:00
pr_done_good " $hpkp_nr_keys "
out " keys, "
fileout "hpkp_spkis" "OK" " $hpkp_nr_keys keys pinned in HPKP header, additional keys are available if the current key is revoked "
2015-09-17 15:30:15 +02:00
fi
# print key=value pair with awk, then strip non-numbers, to be improved with proper parsing of key-value with awk
2017-07-26 09:55:49 +02:00
if " $HAS_SED_E " ; then
hpkp_age_sec = $( awk -F= '/max-age/{max_age=$2; print max_age}' $TMPFILE | sed -E 's/[^[:digit:]]//g' )
else
hpkp_age_sec = $( awk -F= '/max-age/{max_age=$2; print max_age}' $TMPFILE | sed -r 's/[^[:digit:]]//g' )
fi
2015-09-17 15:30:15 +02:00
hpkp_age_days = $(( hpkp_age_sec / 86400 ))
2016-09-01 19:09:12 +02:00
if [ [ $hpkp_age_sec -ge $HPKP_MIN ] ] ; then
2016-03-01 20:36:41 +01:00
pr_done_good " $hpkp_age_days days " ; out " = $hpkp_age_sec s "
2016-01-23 23:33:17 +01:00
fileout "hpkp_age" "OK" " HPKP age is set to $hpkp_age_days days ( $hpkp_age_sec sec) "
2015-09-17 15:30:15 +02:00
else
out " $hpkp_age_sec s = "
2017-08-01 21:42:33 +02:00
pr_svrty_medium " $hpkp_age_days days (< $HPKP_MIN s = $(( HPKP_MIN / 86400 )) days is not good enough) "
fileout "hpkp_age" "MEDIUM" " HPKP age is set to $hpkp_age_days days ( $hpkp_age_sec sec) < $HPKP_MIN s = $(( HPKP_MIN / 86400 )) days is not good enough. "
2015-09-17 15:30:15 +02:00
fi
2016-01-23 19:18:33 +01:00
if includeSubDomains " $TMPFILE " ; then
2016-01-23 23:33:17 +01:00
fileout "hpkp_subdomains" "INFO" "HPKP header is valid for subdomains as well"
2016-01-23 19:18:33 +01:00
else
2016-01-23 23:33:17 +01:00
fileout "hpkp_subdomains" "INFO" "HPKP header is valid for this domain only"
2016-01-23 19:18:33 +01:00
fi
if preload " $TMPFILE " ; then
2016-01-23 23:33:17 +01:00
fileout "hpkp_preload" "INFO" "HPKP header is marked for browser preloading"
2016-01-23 19:18:33 +01:00
else
2016-01-23 23:33:17 +01:00
fileout "hpkp_preload" "INFO" "HPKP header is NOT marked for browser preloading"
2016-01-23 19:18:33 +01:00
fi
2015-09-17 15:30:15 +02:00
2016-10-06 18:53:25 +02:00
# Get the SPKIs first
2016-09-28 20:32:01 +02:00
spki = $( tr ';' '\n' < $TMPFILE | tr -d ' ' | tr -d '\"' | awk -F'=' '/pin.*=/ { print $2 }' )
2017-02-25 16:31:30 +01:00
debugme tmln_out " \n $spki "
2016-07-05 18:10:36 +02:00
# Look at the host certificate first
2016-02-20 11:07:47 +01:00
# get the key fingerprint from the host certificate
if [ [ ! -s " $HOSTCERT " ] ] ; then
get_host_cert || return 1
fi
2016-07-05 18:10:36 +02:00
2017-10-27 19:07:04 +02:00
hpkp_spki_hostcert = " $( $OPENSSL x509 -in $HOSTCERT -pubkey -noout 2>/dev/null | grep -v PUBLIC | \
$OPENSSL base64 -d 2>/dev/null | $OPENSSL dgst -sha256 -binary 2>/dev/null | $OPENSSL base64 2>/dev/null) "
hpkp_ca = " $( $OPENSSL x509 -in $HOSTCERT -issuer -noout 2>/dev/null | sed 's/^.*CN=//' | sed 's/\/.*$//' ) "
2016-07-05 18:10:36 +02:00
# Get keys/hashes from intermediate certificates
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS $PROXY -showcerts -connect $NODEIP : $PORT $SNI " ) </dev/null >$TMPFILE 2>$ERRFILE
2016-07-05 18:10:36 +02:00
# Place the server's certificate in $HOSTCERT and any intermediate
# certificates that were provided in $TEMPDIR/intermediatecerts.pem
# http://backreference.org/2010/05/09/ocsp-verification-with-openssl/
awk -v n = -1 " /Certificate chain/ {start=1}
2016-11-04 02:54:56 +01:00
/-----BEGIN CERTIFICATE-----/{ if ( start) { inc = 1; n++} }
2016-07-05 18:10:36 +02:00
inc { print > ( \" $TEMPDIR /level\" n \" .crt\" ) }
/---END CERTIFICATE-----/{ inc = 0 } " $TMPFILE
nrsaved = $( count_words " $( echo $TEMPDIR /level?.crt 2>/dev/null) " )
2016-07-20 19:01:02 +02:00
rm $TEMPDIR /level0.crt 2>/dev/null
2016-07-05 18:10:36 +02:00
2016-10-06 18:53:25 +02:00
printf "" > " $TEMPDIR /intermediate.hashes "
2016-07-05 18:10:36 +02:00
if [ [ nrsaved -ge 2 ] ] ; then
for cert_fname in $TEMPDIR /level?.crt; do
2017-10-27 19:07:04 +02:00
hpkp_spki_ca = " $( $OPENSSL x509 -in " $cert_fname " -pubkey -noout 2>/dev/null | grep -v PUBLIC | $OPENSSL base64 -d 2>/dev/null |
$OPENSSL dgst -sha256 -binary 2>/dev/null | $OPENSSL enc -base64 2>/dev/null) "
2016-07-05 18:10:36 +02:00
hpkp_name = " $( get_cn_from_cert $cert_fname ) "
2017-10-27 19:07:04 +02:00
hpkp_ca = " $( $OPENSSL x509 -in $cert_fname -issuer -noout 2>/dev/null | sed 's/^.*CN=//' | sed 's/\/.*$//' ) "
[ [ -n $hpkp_name ] ] || hpkp_name = $( $OPENSSL x509 -in " $cert_fname " -subject -noout 2>/dev/null | sed 's/^subject= //' )
2016-10-06 18:53:25 +02:00
echo " $hpkp_spki_ca $hpkp_name " >> " $TEMPDIR /intermediate.hashes "
2016-07-05 18:10:36 +02:00
done
fi
2016-07-05 23:33:20 +02:00
2016-10-06 18:53:25 +02:00
# This is where the matching magic starts, first host certificate, intermediate, then root out of the stores
2016-09-28 20:32:01 +02:00
spki_match = false
has_backup_spki = false
i = 0
2016-10-06 18:53:25 +02:00
for hpkp_spki in $spki ; do
certificate_found = false
# compare collected SPKIs against the host certificate
if [ [ " $hpkp_spki_hostcert " = = " $hpkp_spki " ] ] || [ [ " $hpkp_spki_hostcert " = = " $hpkp_spki = " ] ] ; then
certificate_found = true # We have a match
2016-09-28 20:32:01 +02:00
spki_match = true
2016-10-06 18:53:25 +02:00
out " \n $spaces_indented Host cert: "
pr_done_good " $hpkp_spki "
fileout " hpkp_ $hpkp_spki " "OK" " SPKI $hpkp_spki matches the host certificate "
2015-09-17 15:30:15 +02:00
fi
2017-02-25 16:31:30 +01:00
debugme tm_out " \n $hpkp_spki | $hpkp_spki_hostcert "
2016-07-04 17:21:24 +02:00
2016-07-05 18:10:36 +02:00
# Check for intermediate match
2016-10-06 18:53:25 +02:00
if ! " $certificate_found " ; then
hpkp_matches = $( grep " $hpkp_spki " $TEMPDIR /intermediate.hashes 2>/dev/null)
if [ [ -n $hpkp_matches ] ] ; then # hpkp_matches + hpkp_spki + '='
2016-07-25 10:57:10 +02:00
# We have a match
2016-10-06 18:53:25 +02:00
certificate_found = true
2016-09-28 20:32:01 +02:00
spki_match = true
2016-10-06 18:53:25 +02:00
out " \n $spaces_indented Sub CA: "
pr_done_good " $hpkp_spki "
ca_cn = " $( sed "s/^[a-zA-Z0-9\+\/]*=* *//" <<< $" $hpkp_matches " ) "
pr_italic " $ca_cn "
fileout " hpkp_ $hpkp_spki " "OK" " SPKI $hpkp_spki matches Intermediate CA \" $ca_cn \" pinned in the HPKP header "
2016-07-05 18:10:36 +02:00
fi
2016-07-04 17:21:24 +02:00
fi
2016-10-06 18:53:25 +02:00
# we compare now against a precompiled list of SPKIs against the ROOT CAs we have in $ca_hashes
if ! " $certificate_found " ; then
2016-11-04 08:35:27 +01:00
hpkp_matches = $( grep -h " $hpkp_spki " $ca_hashes 2>/dev/null | sort -u)
2016-07-04 17:21:24 +02:00
if [ [ -n $hpkp_matches ] ] ; then
2016-10-06 18:53:25 +02:00
certificate_found = true # root CA found
2016-09-28 20:32:01 +02:00
spki_match = true
2016-07-05 23:33:20 +02:00
if [ [ $( count_lines " $hpkp_matches " ) -eq 1 ] ] ; then
2016-11-04 02:54:56 +01:00
# replace by awk
2016-09-28 20:32:01 +02:00
match_ca = $( sed "s/[a-zA-Z0-9\+\/]*=* *//" <<< " $hpkp_matches " )
2016-07-05 23:33:20 +02:00
else
match_ca = ""
2016-10-06 18:53:25 +02:00
2016-07-05 23:33:20 +02:00
fi
2016-10-06 18:53:25 +02:00
ca_cn = " $( sed "s/^[a-zA-Z0-9\+\/]*=* *//" <<< $" $hpkp_matches " ) "
if [ [ " $match_ca " = = " $hpkp_ca " ] ] ; then # part of the chain
out " \n $spaces_indented Root CA: "
pr_done_good " $hpkp_spki "
pr_italic " $ca_cn "
fileout " hpkp_ $hpkp_spki " "INFO" " SPKI $hpkp_spki matches Root CA \" $ca_cn \" pinned in the HPKP header. (Root CA part of the chain) "
else # not part of chain
match_ca = ""
has_backup_spki = true # Root CA outside the chain --> we save it for unmatched
fileout " hpkp_ $hpkp_spki " "INFO" " SPKI $hpkp_spki matches Root CA \" $ca_cn \" pinned in the HPKP header. (Root backup SPKI) "
backup_spki[ i] = " $( strip_lf " $hpkp_spki " ) " # save it for later
backup_spki_str[ i] = " $ca_cn " # also the name=CN of the root CA
i = $(( i + 1 ))
2016-07-05 23:33:20 +02:00
fi
2016-07-04 17:21:24 +02:00
fi
2016-07-05 18:10:36 +02:00
fi
2016-10-06 18:53:25 +02:00
# still no success --> it's probably a backup SPKI
if ! " $certificate_found " ; then
2016-09-28 20:32:01 +02:00
# Most likely a backup SPKI, unfortunately we can't tell for what it is: host, intermediates
has_backup_spki = true
2016-10-06 18:53:25 +02:00
backup_spki[ i] = " $( strip_lf " $hpkp_spki " ) " # save it for later
2016-11-04 02:54:56 +01:00
backup_spki_str[ i] = "" # no root ca
2016-09-28 20:32:01 +02:00
i = $(( i + 1 ))
2016-10-06 18:53:25 +02:00
fileout " hpkp_ $hpkp_spki " "INFO" " SPKI $hpkp_spki doesn't match anything. This is ok for a backup for any certificate "
# CSV/JSON output here for the sake of simplicity, rest we do en bloc below
2016-07-05 18:10:36 +02:00
fi
2016-11-04 02:54:56 +01:00
done
2016-07-04 17:21:24 +02:00
2016-10-06 18:53:25 +02:00
# now print every backup spki out we saved before
out " \n $spaces_indented Backups: "
# for i=0 manually do the same as below as there's other indentation here
if [ [ -n " ${ backup_spki_str [0] } " ] ] ; then
pr_done_good " ${ backup_spki [0] } "
#out " Root CA: "
2017-04-06 16:54:20 +02:00
prln_italic " ${ backup_spki_str [0] } "
2016-09-28 20:32:01 +02:00
else
2016-10-06 18:53:25 +02:00
outln " ${ backup_spki [0] } "
2016-09-28 20:32:01 +02:00
fi
2016-10-06 18:53:25 +02:00
# now for i=1
for ( ( i = 1; i < ${# backup_spki [@] } ; i++ ) ) ; do
if [ [ -n " ${ backup_spki_str [i] } " ] ] ; then
# it's a Root CA outside the chain
pr_done_good " $spaces_indented ${ backup_spki [i] } "
#out " Root CA: "
2017-04-06 16:54:20 +02:00
prln_italic " ${ backup_spki_str [i] } "
2016-10-06 18:53:25 +02:00
else
outln " $spaces_indented ${ backup_spki [i] } "
2015-09-17 15:30:15 +02:00
fi
2016-10-06 18:53:25 +02:00
done
2016-11-04 08:35:27 +01:00
if [ [ ! -f " $ca_hashes " ] ] && " $spki_match " ; then
out " $spaces "
2017-02-25 16:31:30 +01:00
prln_warning " Attribution of further hashes couldn't be done as $ca_hashes could not be found "
2016-11-04 08:35:27 +01:00
fileout "hpkp_spkimatch" "WARN" " Attribution of further hashes couldn't be done as $ca_hashes could not be found "
fi
2016-09-28 20:32:01 +02:00
2016-07-04 17:21:24 +02:00
# If all else fails...
2016-10-06 18:53:25 +02:00
if ! " $spki_match " ; then
2016-09-28 20:32:01 +02:00
" $has_backup_spki " && out " $spaces " # we had a few lines with backup SPKIs already
2017-02-25 16:31:30 +01:00
prln_svrty_high " No matching key for SPKI found "
2016-10-06 18:53:25 +02:00
fileout "hpkp_spkimatch" "HIGH" "None of the SPKI match your host certificate, intermediate CA or known root CAs. You may have bricked this site"
2015-09-17 15:30:15 +02:00
fi
2016-07-25 11:02:05 +02:00
2016-10-06 18:53:25 +02:00
if ! " $has_backup_spki " ; then
2017-02-25 16:31:30 +01:00
prln_svrty_high " No backup keys found. Loss/compromise of the currently pinned key(s) will lead to bricked site. "
2016-09-28 20:32:01 +02:00
fileout "hpkp_backup" "HIGH" "No backup keys found. Loss/compromise of the currently pinned key(s) will lead to bricked site."
2015-09-17 15:30:15 +02:00
fi
else
2016-09-28 20:32:01 +02:00
outln "--"
2016-03-12 17:08:43 +01:00
fileout "hpkp" "INFO" "No support for HTTP Public Key Pinning"
2015-09-17 15:30:15 +02:00
fi
tmpfile_handle $FUNCNAME .txt
return $?
2015-05-17 22:43:53 +02:00
}
emphasize_stuff_in_headers( ) {
2017-02-27 17:17:19 +01:00
local html_brown = "<span style=\\\"color:olive;\\\">"
local html_yellow = "<span style=\\\"color:olive;font-weight:bold;\\\">"
local html_off = "<\\/span>"
# see http://www.grymoire.com/Unix/Sed.html#uh-3
# outln "$1" | sed "s/[0-9]*/$brown&${off}/g"
tmln_out " $1 " | sed -e " s/\([0-9]\)/ ${ brown } \1 ${ off } /g " \
2017-06-22 13:39:37 +02:00
-e " s/Unix/ ${ yellow } Unix ${ off } /g " \
-e " s/Debian/ ${ yellow } Debian ${ off } /g " \
-e " s/Win32/ ${ yellow } Win32 ${ off } /g " \
-e " s/Win64/ ${ yellow } Win64 ${ off } /g " \
2017-02-27 17:17:19 +01:00
-e " s/Ubuntu/ ${ yellow } Ubuntu ${ off } /g " \
-e " s/ubuntu/ ${ yellow } ubuntu ${ off } /g " \
2017-06-20 11:31:22 +02:00
-e " s/stretch/ ${ yellow } stretch ${ off } /g " \
2017-02-27 17:17:19 +01:00
-e " s/jessie/ ${ yellow } jessie ${ off } /g " \
-e " s/squeeze/ ${ yellow } squeeze ${ off } /g " \
-e " s/wheezy/ ${ yellow } wheezy ${ off } /g " \
-e " s/lenny/ ${ yellow } lenny ${ off } /g " \
-e " s/SUSE/ ${ yellow } SUSE ${ off } /g " \
-e " s/Red Hat Enterprise Linux/ ${ yellow } Red Hat Enterprise Linux ${ off } /g " \
-e " s/Red Hat/ ${ yellow } Red Hat ${ off } /g " \
-e " s/CentOS/ ${ yellow } CentOS ${ off } /g " \
-e " s/Via/ ${ yellow } Via ${ off } /g " \
-e " s/X-Forwarded/ ${ yellow } X-Forwarded ${ off } /g " \
-e " s/Liferay-Portal/ ${ yellow } Liferay-Portal ${ off } /g " \
-e " s/X-Cache-Lookup/ ${ yellow } X-Cache-Lookup ${ off } /g " \
-e " s/X-Cache/ ${ yellow } X-Cache ${ off } /g " \
-e " s/X-Squid/ ${ yellow } X-Squid ${ off } /g " \
-e " s/X-Server/ ${ yellow } X-Server ${ off } /g " \
-e " s/X-Varnish/ ${ yellow } X-Varnish ${ off } /g " \
-e " s/X-OWA-Version/ ${ yellow } X-OWA-Version ${ off } /g " \
-e " s/MicrosoftSharePointTeamServices/ ${ yellow } MicrosoftSharePointTeamServices ${ off } /g " \
2017-02-27 19:16:29 +01:00
-e " s/X-Application-Context/ ${ yellow } X-Application-Context ${ off } /g " \
2017-02-27 17:17:19 +01:00
-e " s/X-Version/ ${ yellow } X-Version ${ off } /g " \
-e " s/X-Powered-By/ ${ yellow } X-Powered-By ${ off } /g " \
-e " s/X-UA-Compatible/ ${ yellow } X-UA-Compatible ${ off } /g " \
2017-06-20 11:31:22 +02:00
-e " s/Link/ ${ yellow } Link ${ off } /g " \
-e " s/X-Rack-Cache/ ${ yellow } X-Rack-Cache ${ off } /g " \
-e " s/X-Runtime/ ${ yellow } X-Runtime ${ off } /g " \
-e " s/X-Pingback/ ${ yellow } X-Pingback ${ off } /g " \
2017-02-27 17:17:19 +01:00
-e " s/X-AspNet-Version/ ${ yellow } X-AspNet-Version ${ off } /g "
if " $do_html " ; then
2017-04-13 22:06:06 +02:00
if [ [ $COLOR -eq 2 ] ] ; then
html_out " $( tm_out " $1 " | sed -e 's/\&/\&/g' \
-e 's/</\</g' -e 's/>/\>/g' -e 's/"/\"/g' -e "s/'/\'/g" \
-e " s/\([0-9]\)/ ${ html_brown } \1 ${ html_off } /g " \
2017-06-22 13:39:37 +02:00
-e " s/Unix/ ${ html_yellow } Unix ${ html_off } /g " \
-e " s/Debian/ ${ html_yellow } Debian ${ html_off } /g " \
-e " s/Win32/ ${ html_yellow } Win32 ${ html_off } /g " \
-e " s/Win64/ ${ html_yellow } Win64 ${ html_off } /g " \
2017-04-13 22:06:06 +02:00
-e " s/Ubuntu/ ${ html_yellow } Ubuntu ${ html_off } /g " \
-e " s/ubuntu/ ${ html_yellow } ubuntu ${ html_off } /g " \
2017-07-22 20:57:32 +02:00
-e " s/stretch/ ${ html_yellow } stretch ${ html_off } /g " \
2017-04-13 22:06:06 +02:00
-e " s/jessie/ ${ html_yellow } jessie ${ html_off } /g " \
-e " s/squeeze/ ${ html_yellow } squeeze ${ html_off } /g " \
-e " s/wheezy/ ${ html_yellow } wheezy ${ html_off } /g " \
-e " s/lenny/ ${ html_yellow } lenny ${ html_off } /g " \
-e " s/SUSE/ ${ html_yellow } SUSE ${ html_off } /g " \
-e " s/Red Hat Enterprise Linux/ ${ html_yellow } Red Hat Enterprise Linux ${ html_off } /g " \
-e " s/Red Hat/ ${ html_yellow } Red Hat ${ html_off } /g " \
-e " s/CentOS/ ${ html_yellow } CentOS ${ html_off } /g " \
-e " s/Via/ ${ html_yellow } Via ${ html_off } /g " \
-e " s/X-Forwarded/ ${ html_yellow } X-Forwarded ${ html_off } /g " \
-e " s/Liferay-Portal/ ${ html_yellow } Liferay-Portal ${ html_off } /g " \
-e " s/X-Cache-Lookup/ ${ html_yellow } X-Cache-Lookup ${ html_off } /g " \
-e " s/X-Cache/ ${ html_yellow } X-Cache ${ html_off } /g " \
-e " s/X-Squid/ ${ html_yellow } X-Squid ${ html_off } /g " \
-e " s/X-Server/ ${ html_yellow } X-Server ${ html_off } /g " \
-e " s/X-Varnish/ ${ html_yellow } X-Varnish ${ html_off } /g " \
-e " s/X-OWA-Version/ ${ html_yellow } X-OWA-Version ${ html_off } /g " \
-e " s/MicrosoftSharePointTeamServices/ ${ html_yellow } MicrosoftSharePointTeamServices ${ html_off } /g " \
-e " s/X-Application-Context/ ${ html_yellow } X-Application-Context ${ html_off } /g " \
-e " s/X-Version/ ${ html_yellow } X-Version ${ html_off } /g " \
-e " s/X-Powered-By/ ${ html_yellow } X-Powered-By ${ html_off } /g " \
-e " s/X-UA-Compatible/ ${ html_yellow } X-UA-Compatible ${ html_off } /g " \
2017-06-20 11:31:22 +02:00
-e " s/Link/ ${ html_yellow } Link ${ html_off } /g " \
-e " s/X-Runtime/ ${ html_yellow } X-Runtime ${ html_off } /g " \
-e " s/X-Rack-Cache/ ${ html_yellow } X-Rack-Cache ${ html_off } /g " \
-e " s/X-Pingback/ ${ html_yellow } X-Pingback ${ html_off } /g " \
2017-04-13 22:06:06 +02:00
-e " s/X-AspNet-Version/ ${ html_yellow } X-AspNet-Version ${ html_off } /g " ) "
else
html_out " $( html_reserved " $1 " ) "
fi
2017-02-27 17:17:19 +01:00
html_out "\n"
2017-02-10 23:08:49 +01:00
fi
2015-05-17 22:43:53 +02:00
}
2015-07-22 13:11:20 +02:00
run_server_banner( ) {
2015-09-17 15:30:15 +02:00
local serverbanner
if [ [ ! -s $HEADERFILE ] ] ; then
run_http_header " $1 " || return 3
fi
pr_bold " Server banner "
grep -ai '^Server' $HEADERFILE >$TMPFILE
if [ [ $? -eq 0 ] ] ; then
serverbanner = $( sed -e 's/^Server: //' -e 's/^server: //' $TMPFILE )
2016-01-23 19:18:33 +01:00
if [ [ x" $serverbanner " = = "x\n" ] ] || [ [ x" $serverbanner " = = "x\n\r" ] ] || [ [ -z " $serverbanner " ] ] ; then
2015-09-17 15:30:15 +02:00
outln "banner exists but empty string"
2016-01-23 23:33:17 +01:00
fileout "serverbanner" "INFO" "Server banner exists but empty string"
2015-09-17 15:30:15 +02:00
else
emphasize_stuff_in_headers " $serverbanner "
2016-01-23 23:33:17 +01:00
fileout "serverbanner" "INFO" " Server banner identified: $serverbanner "
2016-01-23 19:18:33 +01:00
if [ [ " $serverbanner " = *Microsoft-IIS/6.* ] ] && [ [ $OSSL_VER = = 1.0.2* ] ] ; then
2017-02-25 16:31:30 +01:00
prln_warning " It's recommended to run another test w/ OpenSSL 1.0.1 !"
2015-09-17 15:30:15 +02:00
# see https://github.com/PeterMosmans/openssl/issues/19#issuecomment-100897892
2016-05-20 13:45:53 +02:00
fileout "IIS6_openssl_mismatch" "WARN" "It is recommended to rerun this test w/ OpenSSL 1.0.1. See https://github.com/PeterMosmans/openssl/issues/19#issuecomment-100897892"
2016-01-23 19:18:33 +01:00
fi
2015-09-17 15:30:15 +02:00
fi
# mozilla.github.io/server-side-tls/ssl-config-generator/
2015-05-17 22:43:53 +02:00
# https://support.microsoft.com/en-us/kb/245030
2015-09-17 15:30:15 +02:00
else
outln "(no \"Server\" line in header, interesting!)"
2017-08-28 18:25:45 +02:00
fileout "serverbanner" "INFO" "No Server banner in header, interesting!"
2015-09-17 15:30:15 +02:00
fi
2015-05-17 22:43:53 +02:00
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
return 0
2015-05-17 22:43:53 +02:00
}
2015-07-22 13:11:20 +02:00
run_rp_banner( ) {
2015-09-17 15:30:15 +02:00
local line
local first = true
local spaces = " "
2016-01-23 19:18:33 +01:00
local rp_banners = ""
2015-09-17 15:30:15 +02:00
if [ [ ! -s $HEADERFILE ] ] ; then
run_http_header " $1 " || return 3
fi
pr_bold " Reverse Proxy banner "
2017-06-20 23:18:15 +02:00
egrep -ai '^Via:|^X-Cache|^X-Squid|^X-Varnish:|^X-Server-Name:|^X-Server-Port:|^x-forwarded|^Forwarded' $HEADERFILE >$TMPFILE
2015-09-17 15:30:15 +02:00
if [ [ $? -ne 0 ] ] ; then
outln "--"
2016-01-23 23:33:17 +01:00
fileout "rp_header" "INFO" "No reverse proxy banner found"
2016-09-14 12:23:18 +02:00
else
2015-09-17 15:30:15 +02:00
while read line; do
line = $( strip_lf " $line " )
if ! $first ; then
out " $spaces "
else
first = false
fi
2015-08-17 20:13:52 +02:00
emphasize_stuff_in_headers " $line "
2016-09-14 12:11:51 +02:00
rp_banners = " ${ rp_banners } ${ line } "
2015-08-28 00:15:51 +02:00
done < $TMPFILE
2016-01-23 23:33:17 +01:00
fileout "rp_header" "INFO" " Reverse proxy banner(s) found: $rp_banners "
2015-09-17 15:30:15 +02:00
fi
outln
2015-08-24 23:50:03 +02:00
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
return 0
# emphasize_stuff_in_headers "$(sed 's/^/ /g' $TMPFILE | tr '\n\r' ' ')" || \
2015-06-16 23:00:47 +02:00
}
2015-07-22 13:11:20 +02:00
run_application_banner( ) {
2015-09-17 15:30:15 +02:00
local line
local first = true
local spaces = " "
2016-01-23 19:18:33 +01:00
local app_banners = ""
2015-09-17 15:30:15 +02:00
if [ [ ! -s $HEADERFILE ] ] ; then
run_http_header " $1 " || return 3
fi
pr_bold " Application banner "
2016-03-19 17:20:36 +01:00
egrep -ai '^X-Powered-By|^X-AspNet-Version|^X-Version|^Liferay-Portal|^X-OWA-Version^|^MicrosoftSharePointTeamServices' $HEADERFILE >$TMPFILE
2015-09-17 15:30:15 +02:00
if [ [ $? -ne 0 ] ] ; then
outln "--"
2016-01-23 23:33:17 +01:00
fileout "app_banner" "INFO" "No Application Banners found"
2015-09-17 15:30:15 +02:00
else
2016-09-14 12:16:37 +02:00
while IFS = '' read -r line; do
2015-09-17 15:30:15 +02:00
line = $( strip_lf " $line " )
if ! $first ; then
out " $spaces "
else
first = false
fi
2015-06-19 20:36:32 +02:00
emphasize_stuff_in_headers " $line "
2016-09-14 12:11:51 +02:00
app_banners = " ${ app_banners } ${ line } "
2016-09-14 12:16:37 +02:00
done < " $TMPFILE "
2017-08-28 18:25:45 +02:00
fileout "app_banner" "INFO" " Application Banners found: $app_banners "
2015-09-17 15:30:15 +02:00
fi
tmpfile_handle $FUNCNAME .txt
return 0
2015-05-17 22:43:53 +02:00
}
2017-09-21 10:19:47 +02:00
# arg1: multiline string w cookies
2017-09-25 19:51:10 +02:00
f5_bigip_check( ) {
2017-09-21 10:19:47 +02:00
local allcookies = " $1 "
local ip port cookievalue cookiename
2017-09-25 19:51:10 +02:00
local routed_domain offset
2017-09-21 10:19:47 +02:00
local savedcookies = ""
2017-09-25 19:51:10 +02:00
local spaces = " $2 "
2017-09-21 10:19:47 +02:00
# taken from https://github.com/drwetter/F5-BIGIP-Decoder, more details see there
debugme echo -e " all cookies: >> $allcookies <<\n "
while true; do IFS = '=' read cookiename cookievalue
[ [ -z " $cookievalue " ] ] && break
cookievalue = ${ cookievalue /;/ }
debugme echo $cookiename : $cookievalue
2017-09-25 19:51:10 +02:00
if grep -q -E '[0-9]{9,10}\.[0-9]{3,5}\.0000' <<< " $cookievalue " ; then
ip = " $( f5_ip_oldstyle " $cookievalue " ) "
port = " $( f5_port_decode $cookievalue ) "
out " ${ spaces } F5 cookie (default IPv4 pool member): " ; pr_italic " $cookiename " ; prln_svrty_medium " ${ ip } : ${ port } "
fileout "cookie_bigip_f5" "MEDIUM" " Information leakage: F5 cookie $cookiename $cookievalue is default IPv4 pool member ${ ip } : ${ port } "
elif grep -q -E '^rd[0-9]{1,2}o0{20}f{4}[a-f0-9]{8}o[0-9]{1,5}' <<< " $cookievalue " ; then
routed_domain = " $( f5_determine_routeddomain " $cookievalue " ) "
offset = $(( 2 + ${# routed_domain } + 1 + 24 ))
port = " ${ cookievalue ##*o } "
ip = " $( f5_hex2ip " ${ cookievalue : $offset : 8 } " ) "
out " ${ spaces } F5 cookie (IPv4 pool in routed domain " ; pr_svrty_medium " $routed_domain " ; out "): " ; pr_italic " $cookiename " ; prln_svrty_medium " ${ ip } : ${ port } "
fileout "cookie_bigip_f5" "MEDIUM" " Information leakage: F5 cookie $cookiename $cookievalue is IPv4 pool member in routed domain $routed_domain ${ ip } : ${ port } "
elif grep -q -E '^vi[a-f0-9]{32}\.[0-9]{1,5}' <<< " $cookievalue " ; then
ip = " $( f5_hex2ip6 ${ cookievalue : 2 : 32 } ) "
port = " ${ cookievalue ##*. } "
port = $( f5_port_decode " $port " )
out " ${ spaces } F5 cookie (default IPv6 pool member): " ; pr_italic " $cookiename " ; prln_svrty_medium " ${ ip } : ${ port } "
fileout "cookie_bigip_f5" "MEDIUM" " Information leakage: F5 cookie $cookiename $cookievalue is default IPv6 pool member ${ ip } : ${ port } "
elif grep -q -E '^rd[0-9]{1,2}o[a-f0-9]{32}o[0-9]{1,5}' <<< " $cookievalue " ; then
routed_domain = " $( f5_determine_routeddomain " $cookievalue " ) "
offset = $(( 2 + ${# routed_domain } + 1 ))
port = " ${ cookievalue ##*o } "
ip = " $( f5_hex2ip6 ${ cookievalue : $offset : 32 } ) "
out " ${ spaces } F5 cookie (IPv6 pool in routed domain " ; pr_svrty_medium " $routed_domain " ; out "): " ; pr_italic " $cookiename " ; prln_svrty_medium " ${ ip } : ${ port } "
fileout "cookie_bigip_f5" "MEDIUM" " Information leakage: F5 cookie $cookiename $cookievalue is IPv6 pool member in routed domain $routed_domain ${ ip } : ${ port } "
elif grep -q -E '^\!.*=$' <<< " $cookievalue " ; then
2017-09-21 10:19:47 +02:00
if [ [ " ${# cookievalue } " -eq 81 ] ] ; then
savedcookies = " ${ savedcookies } ${ cookiename } = ${ cookievalue : 1 : 79 } "
2017-09-25 19:51:10 +02:00
out " ${ spaces } Encrypted F5 cookie named " ; pr_italic " ${ cookiename } " ; outln " detected"
fileout "cookie_bigip_f5" "INFO" " encrypted F5 cookie named ${ cookiename } detected "
2017-09-21 10:19:47 +02:00
fi
2017-09-25 19:51:10 +02:00
fi
2017-09-21 10:19:47 +02:00
done <<< " $allcookies "
}
2016-10-01 22:25:14 +02:00
run_cookie_flags( ) { # ARG1: Path
2015-09-17 15:30:15 +02:00
local -i nr_cookies
2017-02-24 16:22:59 +01:00
local -i nr_httponly nr_secure
2016-01-23 19:18:33 +01:00
local negative_word
2016-09-21 20:32:04 +02:00
local msg302 = "" msg302_ = ""
2017-09-25 19:51:10 +02:00
local spaces = " "
2015-09-17 15:30:15 +02:00
if [ [ ! -s $HEADERFILE ] ] ; then
run_http_header " $1 " || return 3
fi
2016-09-21 20:32:04 +02:00
if ! grep -q 20 <<< " $HTTP_STATUS_CODE " ; then
if egrep -q "301|302" <<< " $HTTP_STATUS_CODE " ; then
msg302 = " -- maybe better try target URL of 30x"
msg302_ = " (30x detected, better try target URL of 30x)"
else
msg302 = " -- HTTP status $HTTP_STATUS_CODE signals you maybe missed the web application "
msg302_ = " (maybe missed the application)"
fi
fi
2015-09-17 15:30:15 +02:00
pr_bold " Cookie(s) "
grep -ai '^Set-Cookie' $HEADERFILE >$TMPFILE
2017-09-21 10:19:47 +02:00
if [ [ $? -ne 0 ] ] ; then
outln " (none issued at \" $1 \") $msg302 "
fileout "cookie_count" "INFO" " No cookies issued at \" $1 \" $msg302_ "
else
2017-07-06 13:02:27 +02:00
nr_cookies = $( count_lines " $( cat $TMPFILE ) " )
2016-10-03 21:17:29 +02:00
out " $nr_cookies issued: "
2016-09-21 20:32:04 +02:00
fileout "cookie_count" "INFO" " $nr_cookies cookie(s) issued at \" $1 \" $msg302_ "
2015-09-17 15:30:15 +02:00
if [ [ $nr_cookies -gt 1 ] ] ; then
negative_word = "NONE"
else
negative_word = "NOT"
fi
nr_secure = $( grep -iac secure $TMPFILE )
case $nr_secure in
2016-03-01 20:42:34 +01:00
0) pr_svrty_medium " $negative_word " ; ;
2016-03-01 20:36:41 +01:00
[ 123456789] ) pr_done_good " $nr_secure / $nr_cookies " ; ;
2015-09-17 15:30:15 +02:00
esac
out " secure, "
2017-02-24 16:22:59 +01:00
if [ [ $nr_cookies -eq $nr_secure ] ] ; then
2016-01-23 23:33:17 +01:00
fileout "cookie_secure" "OK" " All $nr_cookies cookie(s) issued at \" $1 \" marked as secure "
2016-01-23 19:18:33 +01:00
else
2017-08-28 18:25:45 +02:00
fileout "cookie_secure" "INFO" " $nr_secure / $nr_cookies cookie(s) issued at \" $1 \" marked as secure "
2016-01-23 19:18:33 +01:00
fi
2015-09-17 15:30:15 +02:00
nr_httponly = $( grep -cai httponly $TMPFILE )
case $nr_httponly in
2016-03-01 20:42:34 +01:00
0) pr_svrty_medium " $negative_word " ; ;
2016-03-01 20:36:41 +01:00
[ 123456789] ) pr_done_good " $nr_httponly / $nr_cookies " ; ;
2015-09-17 15:30:15 +02:00
esac
out " HttpOnly"
2017-02-24 16:22:59 +01:00
if [ [ $nr_cookies -eq $nr_httponly ] ] ; then
2016-09-21 20:32:04 +02:00
fileout "cookie_httponly" "OK" " All $nr_cookies cookie(s) issued at \" $1 \" marked as HttpOnly $msg302_ "
2016-01-23 19:18:33 +01:00
else
2017-08-28 18:25:45 +02:00
fileout "cookie_httponly" "INFO" " $nr_secure / $nr_cookies cookie(s) issued at \" $1 \" marked as HttpOnly $msg302_ "
2016-01-23 19:18:33 +01:00
fi
2017-09-21 10:19:47 +02:00
outln " $msg302 "
allcookies = " $( awk '/[Ss][Ee][Tt]-[Cc][Oo][Oo][Kk][Ii][Ee]:/ { print $2 }' " $TMPFILE " ) "
2017-09-25 19:51:10 +02:00
f5_bigip_check " $allcookies " " $spaces "
2016-09-21 20:32:04 +02:00
fi
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
return 0
2015-05-17 22:43:53 +02:00
}
2015-07-22 13:11:20 +02:00
run_more_flags( ) {
2017-10-30 16:48:48 +01:00
local good_flags2test = "X-Frame-Options X-XSS-Protection X-Content-Type-Options Content-Security-Policy X-Content-Security-Policy X-WebKit-CSP Content-Security-Policy-Report-Only Expect-CT"
2017-06-20 23:18:15 +02:00
local other_flags2test = "Access-Control-Allow-Origin Upgrade X-Served-By X-UA-Compatible Referrer-Policy X-UA-Compatible"
2017-04-06 16:37:45 +02:00
local f2t line
2015-09-17 15:30:15 +02:00
local first = true
local spaces = " "
if [ [ ! -s $HEADERFILE ] ] ; then
run_http_header " $1 " || return 3
fi
2016-10-03 18:52:48 +02:00
2015-09-17 15:30:15 +02:00
pr_bold " Security headers "
2016-10-03 18:52:48 +02:00
for f2t in $good_flags2test ; do
2017-07-26 22:37:50 +02:00
[ [ " $DEBUG " -ge 5 ] ] && echo " testing \" $f2t \" "
2017-06-07 09:54:24 +02:00
detect_header " $f2t " " $f2t " " $spaces "
2016-10-03 18:52:48 +02:00
if [ [ $? -ge 1 ] ] ; then
2016-01-23 19:18:33 +01:00
if ! " $first " ; then
2017-04-05 14:42:55 +02:00
out " $spaces " # output leading spaces if the first header
2015-09-17 15:30:15 +02:00
else
first = false
fi
2017-04-05 14:42:55 +02:00
pr_done_good " $f2t "
2017-04-06 16:37:45 +02:00
line = " $( out_row_aligned_max_width " $f2t $HEADERVALUE " " $spaces " $TERM_WIDTH ) "
outln " ${ line #* } "
2016-10-03 18:52:48 +02:00
fileout " $f2t " "OK" " $f2t : $HEADERVALUE "
fi
done
for f2t in $other_flags2test ; do
2017-07-26 22:37:50 +02:00
[ [ " $DEBUG " -ge 5 ] ] && echo " testing \" $f2t \" "
2017-06-07 09:54:24 +02:00
detect_header " $f2t " " $f2t " " $spaces "
2016-10-03 18:52:48 +02:00
if [ [ $? -ge 1 ] ] ; then
if ! " $first " ; then
2017-04-05 14:42:55 +02:00
out " $spaces " # output leading spaces if the first header
2015-09-17 15:30:15 +02:00
else
first = false
fi
2017-04-05 14:42:55 +02:00
pr_litecyan " $f2t "
outln " $HEADERVALUE " # shouldn't be that long
2017-08-28 18:25:45 +02:00
fileout " $f2t " "INFO" " $f2t : $HEADERVALUE "
2016-10-03 18:52:48 +02:00
fi
2016-11-04 02:54:56 +01:00
done
2016-10-03 18:52:48 +02:00
#TODO: I am not testing for the correctness or anything stupid yet, e.g. "X-Frame-Options: allowall" or Access-Control-Allow-Origin: *
if " $first " ; then
2017-02-25 16:31:30 +01:00
prln_svrty_medium "--"
2016-10-03 18:52:48 +02:00
fileout "sec_headers" "MEDIUM" "No security (or other interesting) headers detected"
ret = 1
else
ret = 0
2015-09-17 15:30:15 +02:00
fi
2015-05-17 22:43:53 +02:00
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
return $ret
2015-05-17 22:43:53 +02:00
}
2017-03-15 15:32:29 +01:00
# #1: string with 2 opensssl codes, output is same in NSS/ssllabs terminology
2015-05-17 22:43:53 +02:00
normalize_ciphercode( ) {
2017-03-15 15:32:29 +01:00
if [ [ " ${ 1 : 2 : 2 } " = = "00" ] ] ; then
tm_out " $( tolower " x ${ 1 : 7 : 2 } " ) "
2015-09-17 15:30:15 +02:00
else
2017-03-15 15:32:29 +01:00
tm_out " $( tolower " x ${ 1 : 2 : 2 } ${ 1 : 7 : 2 } ${ 1 : 12 : 2 } " ) "
2015-09-17 15:30:15 +02:00
fi
return 0
2015-05-17 22:43:53 +02:00
}
prettyprint_local( ) {
2017-02-07 20:25:41 +01:00
local arg line
2017-03-15 15:32:29 +01:00
local hexc hexcode dash ciph sslvers kx auth enc mac export
2015-09-17 15:30:15 +02:00
local re = '^[0-9A-Fa-f]+$'
2016-02-07 19:13:59 +01:00
if [ [ " $1 " = = 0x* ] ] || [ [ " $1 " = = 0X* ] ] ; then
fatal "pls supply x<number> instead" 2
fi
2016-05-26 12:56:55 +02:00
if [ [ -z " $1 " ] ] ; then
pr_headline " Displaying all $OPENSSL_NR_CIPHERS local ciphers " ;
else
pr_headline " Displaying all local ciphers " ;
2016-02-07 19:13:59 +01:00
# pattern provided; which one?
2015-09-17 15:30:15 +02:00
[ [ $1 = ~ $re ] ] && \
2015-10-15 14:15:07 +02:00
pr_headline " matching number pattern \" $1 \" " || \
pr_headline "matching word pattern " \" $1 \" " (ignore case) "
2015-09-17 15:30:15 +02:00
fi
outln "\n"
neat_header
if [ [ -z " $1 " ] ] ; then
$OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>$ERRFILE | while read hexcode dash ciph sslvers kx auth enc mac export ; do # -V doesn't work with openssl < 1.0
2017-03-15 15:32:29 +01:00
hexc = " $( normalize_ciphercode $hexcode ) "
outln " $( neat_list " $hexc " " $ciph " " $kx " " $enc " ) "
2015-09-17 15:30:15 +02:00
done
else
#for arg in $(echo $@ | sed 's/,/ /g'); do
for arg in ${ *//,/ / } ; do
$OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>$ERRFILE | while read hexcode dash ciph sslvers kx auth enc mac export ; do # -V doesn't work with openssl < 1.0
2017-03-15 15:32:29 +01:00
hexc = " $( normalize_ciphercode $hexcode ) "
2015-09-17 15:30:15 +02:00
# for numbers we don't do word matching:
[ [ $arg = ~ $re ] ] && \
2017-03-15 15:32:29 +01:00
line = " $( neat_list " $hexc " " $ciph " " $kx " " $enc " | grep -ai " $arg " ) " || \
line = " $( neat_list " $hexc " " $ciph " " $kx " " $enc " | grep -wai " $arg " ) "
[ [ -n " $line " ] ] && outln " $line "
2015-09-17 15:30:15 +02:00
done
done
fi
outln
return 0
2015-05-17 22:43:53 +02:00
}
# list ciphers (and makes sure you have them locally configured)
# arg[1]: cipher list (or anything else)
More SSLv2 (and SSLv3) related fixes
In doing some work on cipher_pref_check() I noticed that it was failing on SSLv2 since the call to "$OPENSSL s_client" includes SNI. I've also noticed in my testing that "$OPENSSL s_client" will not connect to an SSLv2-only server unless the "-ssl2" flag is included. So, I carefully checked each call to "$OPENSSL s_client" in the program (other than in run_allciphers and run_cipher_per_proto, since those functions are already addresses in PR #341) to see whether they would inappropriate fail with an SSLv2-only (or SSLv3-only) server.
As a general rule, if the call doesn't currently include the protocol, then I added "-ssl2" if $OPTIMAL_PROTO is "-ssl2", indicating that the server only supports SSLv2, and I removed any $SNI if a protocol is specified if a protocol is specified and it is either SSLv2 or SSLv3.
I tested it on an SSLv2-only server, and the results are much better. I also tested it on a collection of other servers, none of which support SSLv2, and the results are the same as with the current code.
The only thing I haven't been able to test is how the revised code works when the "--starttls" option is used. I don't believe the changes I made would cause anything to break in that case, but I also don't think code will work any better in that case, if the server only supports SSLv2. Of course, since no server should support SSLv2 (let alone only SSLv2), it shouldn't really be an issue.
One thing that I did not change, but that I do not understand; why does determine_optimal_proto() try the protocols in the order "-tls1_2 -tls1 -ssl3 -tls1_1 -ssl2" rather than "-tls1_2 -tls1_1 -tls1 -ssl3 -ssl2"? Doesn't the current ordering imply that TLS v1.0 and SSLv3 are better than TLS v1.1?
2016-04-29 23:04:01 +02:00
# arg[2]: protocol (e.g., -ssl2)
2015-05-17 22:43:53 +02:00
listciphers( ) {
2015-09-17 15:30:15 +02:00
local -i ret
local debugname = " $( sed -e s'/\!/not/g' -e 's/\:/_/g' <<< " $1 " ) "
2015-08-10 14:47:11 +02:00
More SSLv2 (and SSLv3) related fixes
In doing some work on cipher_pref_check() I noticed that it was failing on SSLv2 since the call to "$OPENSSL s_client" includes SNI. I've also noticed in my testing that "$OPENSSL s_client" will not connect to an SSLv2-only server unless the "-ssl2" flag is included. So, I carefully checked each call to "$OPENSSL s_client" in the program (other than in run_allciphers and run_cipher_per_proto, since those functions are already addresses in PR #341) to see whether they would inappropriate fail with an SSLv2-only (or SSLv3-only) server.
As a general rule, if the call doesn't currently include the protocol, then I added "-ssl2" if $OPTIMAL_PROTO is "-ssl2", indicating that the server only supports SSLv2, and I removed any $SNI if a protocol is specified if a protocol is specified and it is either SSLv2 or SSLv3.
I tested it on an SSLv2-only server, and the results are much better. I also tested it on a collection of other servers, none of which support SSLv2, and the results are the same as with the current code.
The only thing I haven't been able to test is how the revised code works when the "--starttls" option is used. I don't believe the changes I made would cause anything to break in that case, but I also don't think code will work any better in that case, if the server only supports SSLv2. Of course, since no server should support SSLv2 (let alone only SSLv2), it shouldn't really be an issue.
One thing that I did not change, but that I do not understand; why does determine_optimal_proto() try the protocols in the order "-tls1_2 -tls1 -ssl3 -tls1_1 -ssl2" rather than "-tls1_2 -tls1_1 -tls1 -ssl3 -ssl2"? Doesn't the current ordering imply that TLS v1.0 and SSLv3 are better than TLS v1.1?
2016-04-29 23:04:01 +02:00
$OPENSSL ciphers $2 " $1 " & >$TMPFILE
2015-09-17 15:30:15 +02:00
ret = $?
debugme cat $TMPFILE
2015-05-17 22:43:53 +02:00
2015-08-10 14:47:11 +02:00
tmpfile_handle $FUNCNAME .$debugname .txt
2015-09-17 15:30:15 +02:00
return $ret
2015-05-17 22:43:53 +02:00
}
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
# argv[1]: cipher list to test in OpenSSL syntax
2017-04-12 21:00:08 +02:00
# argv[2]: string on console / HTML or "finding"
# argv[3]: rating whether ok to offer
# argv[4]: string to be appended for fileout
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
# argv[5]: non-SSLv2 cipher list to test (hexcodes), if using sockets
# argv[6]: SSLv2 cipher list to test (hexcodes), if using sockets
2015-05-17 22:43:53 +02:00
std_cipherlists( ) {
2017-11-07 17:53:49 +01:00
local -i i len sclient_success = 1
2017-10-10 22:00:47 +02:00
local cipherlist sslv2_cipherlist detected_ssl2_ciphers
2017-04-12 21:00:08 +02:00
local singlespaces
2017-07-03 22:24:02 +02:00
local proto = ""
2015-09-17 15:30:15 +02:00
local debugname = " $( sed -e s'/\!/not/g' -e 's/\:/_/g' <<< " $1 " ) "
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
[ [ " $OPTIMAL_PROTO " = = "-ssl2" ] ] && proto = " $OPTIMAL_PROTO "
2017-04-12 21:00:08 +02:00
pr_bold " $2 " # to be indented equal to server preferences
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
if [ [ -n " $5 " ] ] || listciphers " $1 " $proto ; then
if [ [ -z " $5 " ] ] || ( " $FAST " && listciphers " $1 " -tls1 ) ; then
2017-10-10 22:00:47 +02:00
for proto in -no_ssl2 -tls1_2 -tls1_1 -tls1 -ssl3; do
if [ [ " $proto " = = "-tls1_2" ] ] ; then
# If $OPENSSL doesn't support TLSv1.3 or if no TLSv1.3
# ciphers are being tested, then a TLSv1.2 ClientHello
# was tested in the first iteration.
2017-10-27 19:07:04 +02:00
! " $HAS_TLS13 " && continue
[ [ ! " $( $OPENSSL ciphers " $1 " 2>/dev/null) " = ~ TLS13 ] ] && continue
2017-10-10 22:00:47 +02:00
fi
! " $HAS_SSL3 " && [ [ " $proto " = = "-ssl3" ] ] && continue
if [ [ " $proto " != "-no_ssl2" ] ] ; then
" $FAST " && continue
[ [ $( has_server_protocol " ${ proto : 1 } " ) -eq 1 ] ] && continue
fi
2017-09-19 23:16:41 +02:00
$OPENSSL s_client $( s_client_options "-cipher " $1 " $BUGS $STARTTLS -connect $NODEIP : $PORT $PROXY $SNI $proto " ) 2>$ERRFILE >$TMPFILE </dev/null
2017-10-10 22:00:47 +02:00
sclient_connect_successful $? $TMPFILE
sclient_success = $?
debugme cat $ERRFILE
[ [ $sclient_success -eq 0 ] ] && break
done
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
else
2017-10-10 22:00:47 +02:00
for proto in 04 03 02 01 00; do
# If $cipherlist doesn't contain any TLSv1.3 ciphers, then there is
# no reason to try a TLSv1.3 ClientHello.
[ [ " $proto " = = "04" ] ] && [ [ ! " $5 " = ~ "13,0" ] ] && continue
[ [ $( has_server_protocol " $proto " ) -eq 1 ] ] && continue
cipherlist = " $( strip_inconsistent_ciphers " $proto " " , $5 " ) "
cipherlist = " ${ cipherlist : 2 } "
if [ [ -n " $cipherlist " ] ] && [ [ " $cipherlist " != "00,ff" ] ] ; then
tls_sockets " $proto " " $cipherlist "
sclient_success = $?
[ [ $sclient_success -eq 2 ] ] && sclient_success = 0
[ [ $sclient_success -eq 0 ] ] && break
fi
done
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
fi
2017-10-06 15:56:11 +02:00
if [ [ $sclient_success -ne 0 ] ] && [ [ 1 -ne $( has_server_protocol ssl2) ] ] ; then
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
if ( [ [ -z " $6 " ] ] || " $FAST " ) && " $HAS_SSL2 " && listciphers " $1 " -ssl2; then
$OPENSSL s_client -cipher " $1 " $BUGS $STARTTLS -connect $NODEIP :$PORT $PROXY -ssl2 2>$ERRFILE >$TMPFILE </dev/null
sclient_connect_successful $? $TMPFILE
sclient_success = $?
debugme cat $ERRFILE
elif [ [ -n " $6 " ] ] ; then
2017-01-04 16:34:13 +01:00
sslv2_sockets " $6 " "true"
if [ [ $? -eq 3 ] ] && [ [ " $V2_HELLO_CIPHERSPEC_LENGTH " -ne 0 ] ] ; then
sslv2_cipherlist = " $( strip_spaces " ${ 6 //,/ } " ) "
len = ${# sslv2_cipherlist }
detected_ssl2_ciphers = " $( grep "Supported cipher: " " $TEMPDIR / $NODEIP .parse_sslv2_serverhello.txt " ) "
for ( ( i = 0; i<len; i = i+6 ) ) ; do
[ [ " $detected_ssl2_ciphers " = ~ " x ${ sslv2_cipherlist : i : 6 } " ] ] && sclient_success = 0 && break
done
fi
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
fi
fi
2017-07-20 02:31:14 +02:00
if [ [ $sclient_success -ne 0 ] ] && $BAD_SERVER_HELLO_CIPHER ; then
# If server failed with a known error, raise it to the user.
if [ [ $STARTTLS_PROTOCOL = = "mysql" ] ] ; then
pr_warning "SERVER_ERROR: test inconclusive due to MySQL Community Edition (yaSSL) bug."
fileout " std_ $4 " "WARN" "SERVER_ERROR: test inconclusive due to MySQL Community Edition (yaSSL) bug."
else
pr_warning "SERVER_ERROR: test inconclusive."
fileout " std_ $4 " "WARN" "SERVER_ERROR: test inconclusive."
fi
else
# Otherwise the error means the server doesn't support that cipher list.
case $3 in
2) if [ [ $sclient_success -eq 0 ] ] ; then
# Strong is excellent to offer
pr_done_best "offered (OK)"
fileout " std_ $4 " "OK" " $2 offered "
else
pr_svrty_medium "not offered"
fileout " std_ $4 " "MEDIUM" " $2 not offered "
fi
; ;
2017-04-12 21:00:08 +02:00
2017-07-20 02:31:14 +02:00
1) if [ [ $sclient_success -eq 0 ] ] ; then
# High is good to offer
pr_done_good "offered (OK)"
fileout " std_ $4 " "OK" " $2 offered "
else
# FIXME: the rating could be readjusted if we knew the result of STRONG before
pr_svrty_medium "not offered"
fileout " std_ $4 " "MEDIUM" " $2 not offered "
fi
; ;
0) if [ [ $sclient_success -eq 0 ] ] ; then
# medium is not that bad
pr_svrty_medium "offered"
fileout " std_ $4 " "MEDIUM" " $2 offered - not too bad "
else
out "not offered (OK)"
fileout " std_ $4 " "OK" " $2 not offered "
fi
; ;
-1) if [ [ $sclient_success -eq 0 ] ] ; then
# bad but there is worse
pr_svrty_high "offered (NOT ok)"
fileout " std_ $4 " "HIGH" " $2 offered - bad "
else
# need a check for -eq 1 here
pr_done_good "not offered (OK)"
fileout " std_ $4 " "OK" " $2 not offered "
fi
; ;
-2) if [ [ $sclient_success -eq 0 ] ] ; then
# the ugly ones
pr_svrty_critical "offered (NOT ok)"
fileout " std_ $4 " "CRITICAL" " $2 offered - ugly "
else
pr_done_best "not offered (OK)"
fileout " std_ $4 " "OK" " $2 not offered "
fi
; ;
*) # we shouldn't reach this
pr_warning " ?: $3 (please report this) "
fileout " std_ $4 " "WARN" " return condition $3 unclear "
; ;
esac
fi
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .$debugname .txt
Update testssl.sh
This PR fixes two issues with HTML generation that were introduced by a commit on March 31, 2016, "[count_ciphers is now un-sed'ed, minor improvements](https://github.com/drwetter/testssl.sh/commit/a480e5f699983207651aa0a8717dc395d13e6e52)."
The first is that in `std_cipherlists()`, `[[ $DEBUG -ge 1 ]] && outln " -- $1" || outln` was changed to `[[ $DEBUG -ge 1 ]] && outln " -- $1" || outln`. The result being that in the HTML output, all of the tests from `run_std_cipherlists()` appear on the same line. This PR changes the line to:
```
[[ $DEBUG -ge 1 ]] && tm_out " -- $1"
outln
``
so that the line break is added to the HTML output, but the debugging information is not.
The second problem is that the commit on March 31 moved the call in main to `html_header()` until after the calls to `get_install_dir()`, `find_openssl_binary()`, `mybanner()`, `check4openssl_oldfarts()`, and `check_bsd_mount()`. The problem is that each of these functions may call an output function that will call `html_out()`.
If `html_out()` is called before `html_header()` and the command line contains `--htmlfile <htmlfile>`, then "htmlfile" will be written to before `html_header()` is called and then `html_header()` will warn that "htmlfile" already exists and then exit the program.
If `html_out()` is called before `html_header()` and the command line contains `--html`, then anything send to `html_out()` before `html_header()` is called (such as the banner) will not appear in the HTML file.
2017-04-04 19:25:31 +02:00
[ [ $DEBUG -ge 1 ] ] && tm_out " -- $1 "
outln
2015-09-17 15:30:15 +02:00
else
2017-03-18 21:01:55 +01:00
singlespaces = $( sed -e 's/ \+/ /g' -e 's/^ //' -e 's/ $//g' -e 's/ //g' <<< " $2 " )
More SSLv2 (and SSLv3) related fixes
In doing some work on cipher_pref_check() I noticed that it was failing on SSLv2 since the call to "$OPENSSL s_client" includes SNI. I've also noticed in my testing that "$OPENSSL s_client" will not connect to an SSLv2-only server unless the "-ssl2" flag is included. So, I carefully checked each call to "$OPENSSL s_client" in the program (other than in run_allciphers and run_cipher_per_proto, since those functions are already addresses in PR #341) to see whether they would inappropriate fail with an SSLv2-only (or SSLv3-only) server.
As a general rule, if the call doesn't currently include the protocol, then I added "-ssl2" if $OPTIMAL_PROTO is "-ssl2", indicating that the server only supports SSLv2, and I removed any $SNI if a protocol is specified if a protocol is specified and it is either SSLv2 or SSLv3.
I tested it on an SSLv2-only server, and the results are much better. I also tested it on a collection of other servers, none of which support SSLv2, and the results are the same as with the current code.
The only thing I haven't been able to test is how the revised code works when the "--starttls" option is used. I don't believe the changes I made would cause anything to break in that case, but I also don't think code will work any better in that case, if the server only supports SSLv2. Of course, since no server should support SSLv2 (let alone only SSLv2), it shouldn't really be an issue.
One thing that I did not change, but that I do not understand; why does determine_optimal_proto() try the protocols in the order "-tls1_2 -tls1 -ssl3 -tls1_1 -ssl2" rather than "-tls1_2 -tls1_1 -tls1 -ssl3 -ssl2"? Doesn't the current ordering imply that TLS v1.0 and SSLv3 are better than TLS v1.1?
2016-04-29 23:04:01 +02:00
if [ [ " $OPTIMAL_PROTO " = = "-ssl2" ] ] ; then
2017-02-25 16:31:30 +01:00
prln_local_problem " No $singlespaces for SSLv2 configured in $OPENSSL "
More SSLv2 (and SSLv3) related fixes
In doing some work on cipher_pref_check() I noticed that it was failing on SSLv2 since the call to "$OPENSSL s_client" includes SNI. I've also noticed in my testing that "$OPENSSL s_client" will not connect to an SSLv2-only server unless the "-ssl2" flag is included. So, I carefully checked each call to "$OPENSSL s_client" in the program (other than in run_allciphers and run_cipher_per_proto, since those functions are already addresses in PR #341) to see whether they would inappropriate fail with an SSLv2-only (or SSLv3-only) server.
As a general rule, if the call doesn't currently include the protocol, then I added "-ssl2" if $OPTIMAL_PROTO is "-ssl2", indicating that the server only supports SSLv2, and I removed any $SNI if a protocol is specified if a protocol is specified and it is either SSLv2 or SSLv3.
I tested it on an SSLv2-only server, and the results are much better. I also tested it on a collection of other servers, none of which support SSLv2, and the results are the same as with the current code.
The only thing I haven't been able to test is how the revised code works when the "--starttls" option is used. I don't believe the changes I made would cause anything to break in that case, but I also don't think code will work any better in that case, if the server only supports SSLv2. Of course, since no server should support SSLv2 (let alone only SSLv2), it shouldn't really be an issue.
One thing that I did not change, but that I do not understand; why does determine_optimal_proto() try the protocols in the order "-tls1_2 -tls1 -ssl3 -tls1_1 -ssl2" rather than "-tls1_2 -tls1_1 -tls1 -ssl3 -ssl2"? Doesn't the current ordering imply that TLS v1.0 and SSLv3 are better than TLS v1.1?
2016-04-29 23:04:01 +02:00
else
2017-02-25 16:31:30 +01:00
prln_local_problem " No $singlespaces configured in $OPENSSL "
More SSLv2 (and SSLv3) related fixes
In doing some work on cipher_pref_check() I noticed that it was failing on SSLv2 since the call to "$OPENSSL s_client" includes SNI. I've also noticed in my testing that "$OPENSSL s_client" will not connect to an SSLv2-only server unless the "-ssl2" flag is included. So, I carefully checked each call to "$OPENSSL s_client" in the program (other than in run_allciphers and run_cipher_per_proto, since those functions are already addresses in PR #341) to see whether they would inappropriate fail with an SSLv2-only (or SSLv3-only) server.
As a general rule, if the call doesn't currently include the protocol, then I added "-ssl2" if $OPTIMAL_PROTO is "-ssl2", indicating that the server only supports SSLv2, and I removed any $SNI if a protocol is specified if a protocol is specified and it is either SSLv2 or SSLv3.
I tested it on an SSLv2-only server, and the results are much better. I also tested it on a collection of other servers, none of which support SSLv2, and the results are the same as with the current code.
The only thing I haven't been able to test is how the revised code works when the "--starttls" option is used. I don't believe the changes I made would cause anything to break in that case, but I also don't think code will work any better in that case, if the server only supports SSLv2. Of course, since no server should support SSLv2 (let alone only SSLv2), it shouldn't really be an issue.
One thing that I did not change, but that I do not understand; why does determine_optimal_proto() try the protocols in the order "-tls1_2 -tls1 -ssl3 -tls1_1 -ssl2" rather than "-tls1_2 -tls1_1 -tls1 -ssl3 -ssl2"? Doesn't the current ordering imply that TLS v1.0 and SSLv3 are better than TLS v1.1?
2016-04-29 23:04:01 +02:00
fi
2016-01-23 23:33:17 +01:00
fileout " std_ $4 " "WARN" " Cipher $2 ( $1 ) not supported by local OpenSSL ( $OPENSSL ) "
2015-09-17 15:30:15 +02:00
fi
2015-05-17 22:43:53 +02:00
}
# sockets inspired by http://blog.chris007.de/?p=238
2015-05-29 19:56:57 +02:00
# ARG1: hexbyte with a leading comma (!!), separated by commas
2015-05-17 22:43:53 +02:00
# ARG2: sleep
socksend( ) {
2015-09-17 15:30:15 +02:00
# the following works under BSD and Linux, which is quite tricky. So don't mess with it unless you're really sure what you do
2016-03-05 21:07:49 +01:00
if " $HAS_SED_E " ; then
2017-03-25 13:23:21 +01:00
data = $( sed -e 's/# .*$//g' -e 's/ //g' <<< " $1 " | sed -E 's/^[[:space:]]+//; s/[[:space:]]+$//; /^$/d' | sed 's/,/\\/g' | tr -d '\n' )
2015-09-17 15:30:15 +02:00
else
2017-03-25 13:23:21 +01:00
data = $( sed -e 's/# .*$//g' -e 's/ //g' <<< " $1 " | sed -r 's/^[[:space:]]+//; s/[[:space:]]+$//; /^$/d' | sed 's/,/\\/g' | tr -d '\n' )
2015-09-17 15:30:15 +02:00
fi
2017-07-26 22:37:50 +02:00
[ [ $DEBUG -ge 4 ] ] && echo -e " \n\" $data \" "
2015-09-17 15:30:15 +02:00
printf -- " $data " >& 5 2>/dev/null &
sleep $2
2015-05-17 22:43:53 +02:00
}
2016-01-15 16:37:47 +01:00
openssl2rfc( ) {
2016-06-10 17:11:39 +02:00
local rfcname = ""
local -i i
2016-06-07 20:02:48 +02:00
2016-06-10 17:11:39 +02:00
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
[ [ " $1 " = = " ${ TLS_CIPHER_OSSL_NAME [i] } " ] ] && rfcname = " ${ TLS_CIPHER_RFC_NAME [i] } " && break
done
[ [ " $rfcname " = = "-" ] ] && rfcname = ""
2017-02-25 16:31:30 +01:00
[ [ -n " $rfcname " ] ] && tm_out " $rfcname "
2016-06-07 20:02:48 +02:00
return 0
2016-01-15 16:37:47 +01:00
}
rfc2openssl( ) {
2016-06-10 17:11:39 +02:00
local ossl_name
local -i i
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
[ [ " $1 " = = " ${ TLS_CIPHER_RFC_NAME [i] } " ] ] && ossl_name = " ${ TLS_CIPHER_OSSL_NAME [i] } " && break
done
[ [ " $ossl_name " = = "-" ] ] && ossl_name = ""
2017-02-25 16:31:30 +01:00
[ [ -n " $ossl_name " ] ] && tm_out " $ossl_name "
2016-06-17 22:33:00 +02:00
return 0
2016-01-15 16:37:47 +01:00
}
2017-11-02 22:13:54 +01:00
openssl2hexcode( ) {
local hexc = ""
local -i i
if [ [ $TLS_NR_CIPHERS -eq 0 ] ] ; then
2017-11-05 20:30:18 +01:00
hexc = " $( $OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL:@STRENGTH' | awk '/ ' " $1 " ' / { print $1 }' ) "
2017-11-02 22:13:54 +01:00
else
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
[ [ " $1 " = = " ${ TLS_CIPHER_OSSL_NAME [i] } " ] ] && hexc = " ${ TLS_CIPHER_HEXCODE [i] } " && break
done
fi
[ [ -z " $hexc " ] ] && return 1
tm_out " $hexc "
return 0
}
rfc2hexcode( ) {
local hexc = ""
local -i i
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
[ [ " $1 " = = " ${ TLS_CIPHER_RFC_NAME [i] } " ] ] && hexc = " ${ TLS_CIPHER_HEXCODE [i] } " && break
done
[ [ -z " $hexc " ] ] && return 1
tm_out " $hexc "
return 0
}
2015-05-17 22:43:53 +02:00
show_rfc_style( ) {
2016-09-28 21:36:49 +02:00
local rfcname = "" hexcode
local -i i
2016-10-27 20:28:16 +02:00
2016-09-28 21:36:49 +02:00
hexcode = " $( toupper " $1 " ) "
case ${# hexcode } in
3) hexcode = " 0x00,0x ${ hexcode : 1 : 2 } " ; ;
5) hexcode = " 0x ${ hexcode : 1 : 2 } ,0x ${ hexcode : 3 : 2 } " ; ;
7) hexcode = " 0x ${ hexcode : 1 : 2 } ,0x ${ hexcode : 3 : 2 } ,0x ${ hexcode : 5 : 2 } " ; ;
*) return 1 ; ;
esac
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
[ [ " $hexcode " = = " ${ TLS_CIPHER_HEXCODE [i] } " ] ] && rfcname = " ${ TLS_CIPHER_RFC_NAME [i] } " && break
done
[ [ " $rfcname " = = "-" ] ] && rfcname = ""
2017-02-25 16:31:30 +01:00
[ [ -n " $rfcname " ] ] && tm_out " $rfcname "
2015-09-17 15:30:15 +02:00
return 0
2015-05-17 22:43:53 +02:00
}
neat_header( ) {
2017-02-27 16:34:04 +01:00
if [ [ " $DISPLAY_CIPHERNAMES " = ~ rfc ] ] ; then
out " $( printf -- "Hexcode Cipher Suite Name (RFC) KeyExch. Encryption Bits" ) "
[ [ " $DISPLAY_CIPHERNAMES " != "rfc-only" ] ] && out " $( printf -- " Cipher Suite Name (OpenSSL)" ) "
outln
out " $( printf -- "%s------------------------------------------------------------------------------------------" ) "
[ [ " $DISPLAY_CIPHERNAMES " != "rfc-only" ] ] && out " $( printf -- "---------------------------------------" ) "
outln
else
out " $( printf -- "Hexcode Cipher Suite Name (OpenSSL) KeyExch. Encryption Bits" ) "
[ [ " $DISPLAY_CIPHERNAMES " != "openssl-only" ] ] && out " $( printf -- " Cipher Suite Name (RFC)" ) "
outln
out " $( printf -- "%s--------------------------------------------------------------------------" ) "
[ [ " $DISPLAY_CIPHERNAMES " != "openssl-only" ] ] && out " $( printf -- "---------------------------------------------------" ) "
outln
fi
2015-05-17 22:43:53 +02:00
}
2015-05-27 17:04:35 +02:00
# arg1: hexcode
# arg2: cipher in openssl notation
# arg3: keyexchange
# arg4: encryption (maybe included "export")
2017-01-25 16:41:36 +01:00
# arg5: "true" if the cipher's "quality" should be highlighted
# "false" if the line should be printed in light grey
2017-02-07 20:25:41 +01:00
# empty if line should be returned as a string
2015-05-17 22:43:53 +02:00
neat_list( ) {
2015-09-17 15:30:15 +02:00
local hexcode = " $1 "
2016-10-27 20:28:16 +02:00
local ossl_cipher = " $2 " tls_cipher = ""
2017-02-07 20:25:41 +01:00
local kx enc strength line what_dh bits
local -i i len
2015-09-17 15:30:15 +02:00
2015-10-01 13:27:14 +02:00
kx = " ${ 3 //Kx=/ } "
enc = " ${ 4 //Enc=/ } "
2017-10-27 17:49:11 +02:00
# In two cases LibreSSL uses very long names for encryption algorithms
# and doesn't include the number of bits.
[ [ " $enc " = = "ChaCha20-Poly1305" ] ] && enc = "CHACHA20(256)"
[ [ " $enc " = = "GOST-28178-89-CNT" ] ] && enc = "GOST(256)"
2016-07-03 22:35:21 +02:00
strength = " ${ enc // \) / } " # retrieve (). first remove traling ")"
strength = " ${ strength #* \( } " # exfiltrate (VAL
enc = " ${ enc %% \( * } "
enc = " ${ enc //POLY1305/ } " # remove POLY1305
enc = " ${ enc // \/ / } " # remove "/"
2017-02-27 16:34:04 +01:00
grep -iq export <<< " $export " && strength = " $strength ,exp "
2016-01-16 20:51:03 +01:00
2017-02-27 16:34:04 +01:00
[ [ " $DISPLAY_CIPHERNAMES " != "openssl-only" ] ] && tls_cipher = " $( show_rfc_style " $hexcode " ) "
2016-10-27 20:28:16 +02:00
2017-02-27 16:34:04 +01:00
if [ [ " $5 " != "true" ] ] ; then
if [ [ " $DISPLAY_CIPHERNAMES " = ~ rfc ] ] ; then
line = " $( printf -- " %-7s %-49s %-10s %-12s%-8s" " $hexcode " " $tls_cipher " " $kx " " $enc " " $strength " ) "
[ [ " $DISPLAY_CIPHERNAMES " != "rfc-only" ] ] && line += " $( printf -- " %-33s ${ SHOW_EACH_C : + %-0s } " " $ossl_cipher " ) "
else
line = " $( printf -- " %-7s %-33s %-10s %-12s%-8s" " $hexcode " " $ossl_cipher " " $kx " " $enc " " $strength " ) "
[ [ " $DISPLAY_CIPHERNAMES " != "openssl-only" ] ] && line += " $( printf -- " %-49s ${ SHOW_EACH_C : + %-0s } " " $tls_cipher " ) "
fi
if [ [ -z " $5 " ] ] ; then
tm_out " $line "
else
pr_deemphasize " $line "
fi
2017-01-30 15:32:47 +01:00
return 0
fi
2017-10-11 16:59:13 +02:00
if [ [ " $kx " = ~ " " ] ] ; then
what_dh = " ${ kx %% * } "
bits = " ${ kx ##* } "
else
what_dh = " $kx "
bits = ""
fi
2017-02-27 16:34:04 +01:00
len = ${# kx }
if [ [ " $DISPLAY_CIPHERNAMES " = ~ rfc ] ] ; then
out " $( printf -- " %-7s %-49s " " $hexcode " " $tls_cipher " ) "
else
out " $( printf -- " %-7s %-33s " " $hexcode " " $ossl_cipher " ) "
fi
2017-02-07 20:25:41 +01:00
out " $what_dh "
if [ [ -n " $bits " ] ] ; then
if [ [ $what_dh = = "DH" ] ] || [ [ $what_dh = = "EDH" ] ] ; then
pr_dh_quality " $bits " " $bits "
elif [ [ $what_dh = = "ECDH" ] ] ; then
pr_ecdh_quality " $bits " " $bits "
fi
2016-01-31 23:53:13 +01:00
fi
2017-02-07 20:25:41 +01:00
for ( ( i = len; i<10; i++ ) ) ; do
out " "
done
2017-02-27 16:34:04 +01:00
out " $( printf -- " %-12s%-8s" " $enc " " $strength " ) "
if [ [ " $DISPLAY_CIPHERNAMES " = = rfc ] ] ; then
out " $( printf -- " %-33s ${ SHOW_EACH_C : + %-0s } " " $ossl_cipher " ) "
elif [ [ " $DISPLAY_CIPHERNAMES " = = openssl ] ] ; then
out " $( printf -- " %-49s ${ SHOW_EACH_C : + %-0s } " " $tls_cipher " ) "
fi
2015-05-17 22:43:53 +02:00
}
2017-04-10 14:45:39 +02:00
run_cipher_match( ) {
2016-12-20 19:14:40 +01:00
local hexc n auth export ciphers_to_test supported_sslv2_ciphers s
local -a hexcode normalized_hexcode ciph sslvers kx enc export2 sigalg
local -a ciphers_found ciphers_found2 ciph2 rfc_ciph rfc_ciph2 ossl_supported
local -a -i index
local -i nr_ciphers = 0 nr_ossl_ciphers = 0 nr_nonossl_ciphers = 0
local -i num_bundles mod_check bundle_size bundle end_of_bundle
2017-07-03 22:24:02 +02:00
local dhlen has_dh_bits = " $HAS_DH_BITS "
2017-10-27 19:52:09 +02:00
local cipher proto protos_to_try
2017-04-13 20:03:51 +02:00
local available
2016-12-20 19:14:40 +01:00
local -i sclient_success
2015-09-17 15:30:15 +02:00
local re = '^[0-9A-Fa-f]+$'
2016-12-20 19:14:40 +01:00
local using_sockets = true
" $SSL_NATIVE " && using_sockets = false
" $FAST " && using_sockets = false
[ [ $TLS_NR_CIPHERS = = 0 ] ] && using_sockets = false
2015-09-17 15:30:15 +02:00
2017-02-04 15:11:03 +01:00
pr_headline " Testing ciphers with "
2016-01-23 19:18:33 +01:00
if [ [ $1 = ~ $re ] ] ; then
pr_headline " matching number pattern \" $1 \" "
tjolines = " $tjolines matching number pattern \" $1 \"\n\n "
else
2015-10-15 14:15:07 +02:00
pr_headline "word pattern " \" $1 \" " (ignore case) "
2016-01-23 19:18:33 +01:00
tjolines = " $tjolines word pattern \" $1 \" (ignore case)\n\n "
fi
2015-09-17 15:30:15 +02:00
outln
2016-12-20 19:14:40 +01:00
if ! " $using_sockets " ; then
[ [ $TLS_NR_CIPHERS = = 0 ] ] && ! " $SSL_NATIVE " && ! " $FAST " && pr_warning " Cipher mapping not available, doing a fallback to openssl"
if ! " $HAS_DH_BITS " ; then
[ [ $TLS_NR_CIPHERS = = 0 ] ] && ! " $SSL_NATIVE " && ! " $FAST " && out "."
2017-02-25 16:31:30 +01:00
prln_warning " (Your $OPENSSL cannot show DH/ECDH bits) "
2016-12-20 19:14:40 +01:00
fi
fi
2015-09-17 15:30:15 +02:00
outln
neat_header
#for arg in $(echo $@ | sed 's/,/ /g'); do
for arg in ${ *//, / } ; do
2016-12-20 19:14:40 +01:00
if " $using_sockets " || [ [ $OSSL_VER_MAJOR -lt 1 ] ] ; then
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
hexc = " ${ TLS_CIPHER_HEXCODE [i] } "
if [ [ ${# hexc } -eq 9 ] ] ; then
hexcode[ nr_ciphers] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } "
if [ [ " ${ hexc : 2 : 2 } " = = "00" ] ] ; then
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 7 : 2 } "
else
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } "
fi
2016-07-25 23:00:49 +02:00
else
2016-12-20 19:14:40 +01:00
hexc = " $( tolower " $hexc " ) "
hexcode[ nr_ciphers] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } , ${ hexc : 12 : 2 } "
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } ${ hexc : 12 : 2 } "
2016-07-25 23:00:49 +02:00
fi
2016-12-20 19:14:40 +01:00
if [ [ $arg = ~ $re ] ] ; then
neat_list " ${ normalized_hexcode [nr_ciphers] } " " ${ TLS_CIPHER_OSSL_NAME [i] } " " ${ TLS_CIPHER_KX [i] } " " ${ TLS_CIPHER_ENC [i] } " | grep -qai " $arg "
2016-07-25 23:00:49 +02:00
else
2016-12-20 19:14:40 +01:00
neat_list " ${ normalized_hexcode [nr_ciphers] } " " ${ TLS_CIPHER_OSSL_NAME [i] } " " ${ TLS_CIPHER_KX [i] } " " ${ TLS_CIPHER_ENC [i] } " | grep -qwai " $arg "
2016-07-25 23:00:49 +02:00
fi
2016-12-20 19:14:40 +01:00
if [ [ $? -eq 0 ] ] && ( " $using_sockets " || " ${ TLS_CIPHER_OSSL_SUPPORTED [i] } " ) ; then # string matches, so we can ssl to it:
normalized_hexcode[ nr_ciphers] = " $( tolower " ${ normalized_hexcode [nr_ciphers] } " ) "
ciph[ nr_ciphers] = " ${ TLS_CIPHER_OSSL_NAME [i] } "
rfc_ciph[ nr_ciphers] = " ${ TLS_CIPHER_RFC_NAME [i] } "
kx[ nr_ciphers] = " ${ TLS_CIPHER_KX [i] } "
enc[ nr_ciphers] = " ${ TLS_CIPHER_ENC [i] } "
sslvers[ nr_ciphers] = " ${ TLS_CIPHER_SSLVERS [i] } "
export2[ nr_ciphers] = " ${ TLS_CIPHER_EXPORT [i] } "
ciphers_found[ nr_ciphers] = false
sigalg[ nr_ciphers] = ""
ossl_supported[ nr_ciphers] = " ${ TLS_CIPHER_OSSL_SUPPORTED [i] } "
if " $using_sockets " && ! " $has_dh_bits " && \
( [ [ ${ kx [nr_ciphers] } = = "Kx=ECDH" ] ] || [ [ ${ kx [nr_ciphers] } = = "Kx=DH" ] ] || [ [ ${ kx [nr_ciphers] } = = "Kx=EDH" ] ] ) ; then
ossl_supported[ nr_ciphers] = false
2015-09-17 15:30:15 +02:00
fi
2016-12-20 19:14:40 +01:00
nr_ciphers += 1
2015-09-17 15:30:15 +02:00
fi
2016-12-20 19:14:40 +01:00
done
else
while read hexc n ciph[ nr_ciphers] sslvers[ nr_ciphers] kx[ nr_ciphers] auth enc[ nr_ciphers] mac export2[ nr_ciphers] ; do
2017-03-15 15:32:29 +01:00
hexc = " $( normalize_ciphercode $hexc ) "
2016-12-20 19:14:40 +01:00
# is argument a number?
if [ [ $arg = ~ $re ] ] ; then
2017-03-15 15:32:29 +01:00
neat_list " $hexc " " ${ ciph [nr_ciphers] } " " ${ kx [nr_ciphers] } " " ${ enc [nr_ciphers] } " | grep -qai " $arg "
2015-09-17 15:30:15 +02:00
else
2017-03-15 15:32:29 +01:00
neat_list " $hexc " " ${ ciph [nr_ciphers] } " " ${ kx [nr_ciphers] } " " ${ enc [nr_ciphers] } " | grep -qwai " $arg "
2016-12-20 19:14:40 +01:00
fi
if [ [ $? -eq 0 ] ] ; then # string matches, so we can ssl to it:
ciphers_found[ nr_ciphers] = false
2017-03-15 15:32:29 +01:00
normalized_hexcode[ nr_ciphers] = " $hexc "
2016-12-20 19:14:40 +01:00
sigalg[ nr_ciphers] = ""
ossl_supported[ nr_ciphers] = true
nr_ciphers += 1
fi
done < <( $OPENSSL ciphers $ossl_ciphers_proto -V 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>>$ERRFILE )
fi
# Test the SSLv2 ciphers, if any.
if " $using_sockets " ; then
ciphers_to_test = ""
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
if [ [ " ${ sslvers [i] } " = = "SSLv2" ] ] ; then
ciphers_to_test += " , ${ hexcode [i] } "
fi
done
if [ [ -n " $ciphers_to_test " ] ] ; then
sslv2_sockets " ${ ciphers_to_test : 2 } " "true"
if [ [ $? -eq 3 ] ] && [ [ " $V2_HELLO_CIPHERSPEC_LENGTH " -ne 0 ] ] ; then
supported_sslv2_ciphers = " $( grep "Supported cipher: " " $TEMPDIR / $NODEIP .parse_sslv2_serverhello.txt " ) "
2017-10-27 16:34:04 +02:00
" $SHOW_SIGALGO " && s = " $( read_sigalg_from_file " $HOSTCERT " ) "
2016-12-20 19:14:40 +01:00
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
2017-02-24 16:22:59 +01:00
if [ [ " ${ sslvers [i] } " = = "SSLv2" ] ] && [ [ " $supported_sslv2_ciphers " = ~ ${ normalized_hexcode [i] } ] ] ; then
2016-12-20 19:14:40 +01:00
ciphers_found[ i] = true
" $SHOW_SIGALGO " && sigalg[ i] = " $s "
fi
done
2015-09-17 15:30:15 +02:00
fi
2016-12-20 19:14:40 +01:00
fi
else
ciphers_to_test = ""
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
if [ [ " ${ sslvers [i] } " = = "SSLv2" ] ] ; then
ciphers_to_test += " : ${ ciph [i] } "
fi
done
if [ [ -n " $ciphers_to_test " ] ] ; then
$OPENSSL s_client -cipher " ${ ciphers_to_test : 1 } " $STARTTLS $BUGS -connect $NODEIP :$PORT $PROXY -ssl2 >$TMPFILE 2>$ERRFILE </dev/null
sclient_connect_successful " $? " " $TMPFILE "
if [ [ " $? " -eq 0 ] ] ; then
supported_sslv2_ciphers = " $( grep -A 4 "Ciphers common between both SSL endpoints:" $TMPFILE ) "
2017-10-27 16:34:04 +02:00
" $SHOW_SIGALGO " && s = " $( read_sigalg_from_file " $TMPFILE " ) "
2016-12-20 19:14:40 +01:00
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
2017-02-24 16:22:59 +01:00
if [ [ " ${ sslvers [i] } " = = "SSLv2" ] ] && [ [ " $supported_sslv2_ciphers " = ~ ${ ciph [i] } ] ] ; then
2016-12-20 19:14:40 +01:00
ciphers_found[ i] = true
" $SHOW_SIGALGO " && sigalg[ i] = " $s "
fi
done
fi
fi
fi
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
if " ${ ossl_supported [i] } " && [ [ " ${ sslvers [i] } " != "SSLv2" ] ] ; then
ciphers_found2[ nr_ossl_ciphers] = false
ciph2[ nr_ossl_ciphers] = " ${ ciph [i] } "
index[ nr_ossl_ciphers] = $i
nr_ossl_ciphers += 1
2015-09-17 15:30:15 +02:00
fi
done
2016-12-20 19:14:40 +01:00
if [ [ $nr_ossl_ciphers -eq 0 ] ] ; then
num_bundles = 0
else
# Some servers can't handle a handshake with >= 128 ciphers. So,
# test cipher suites in bundles of 128 or less.
num_bundles = $nr_ossl_ciphers /128
mod_check = $nr_ossl_ciphers %128
[ [ $mod_check -ne 0 ] ] && num_bundles = $num_bundles +1
bundle_size = $nr_ossl_ciphers /$num_bundles
mod_check = $nr_ossl_ciphers %$num_bundles
[ [ $mod_check -ne 0 ] ] && bundle_size += 1
fi
2017-10-27 19:52:09 +02:00
if " $HAS_TLS13 " ; then
protos_to_try = "-no_ssl2 -tls1_2 -tls1_1 -tls1"
else
protos_to_try = "-no_ssl2 -tls1_1 -tls1"
fi
" $HAS_SSL3 " && protos_to_try += " -ssl3"
for proto in $protos_to_try ; do
if [ [ " $proto " = = "-tls1_1" ] ] ; then
num_bundles = 1
bundle_size = $nr_ossl_ciphers
fi
for ( ( bundle = 0; bundle < num_bundles; bundle++ ) ) ; do
end_of_bundle = $bundle *$bundle_size +$bundle_size
[ [ $end_of_bundle -gt $nr_ossl_ciphers ] ] && end_of_bundle = $nr_ossl_ciphers
while true; do
ciphers_to_test = ""
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
! " ${ ciphers_found2 [i] } " && ciphers_to_test += " : ${ ciph2 [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
$OPENSSL s_client $( s_client_options " $proto -cipher " ${ ciphers_to_test : 1 } " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
sclient_connect_successful " $? " " $TMPFILE " || break
cipher = $( get_cipher $TMPFILE )
[ [ -z " $cipher " ] ] && break
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
[ [ " $cipher " = = " ${ ciph2 [i] } " ] ] && ciphers_found2[ i] = true && break
done
[ [ $i -eq $end_of_bundle ] ] && break
i = ${ index [i] }
ciphers_found[ i] = true
[ [ " $cipher " = = TLS13* ] ] && kx[ i] = " $( read_dhtype_from_file $TMPFILE ) "
if [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ; then
dhlen = $( read_dhbits_from_file " $TMPFILE " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
fi
" $SHOW_SIGALGO " && grep -q "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TMPFILE && \
sigalg[ i] = " $( read_sigalg_from_file " $TMPFILE " ) "
2016-12-20 19:14:40 +01:00
done
done
done
if " $using_sockets " ; then
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
2017-10-27 19:52:09 +02:00
if ! " ${ ciphers_found [i] } " && [ [ " ${ sslvers [i] } " != "SSLv2" ] ] ; then
2016-12-20 19:14:40 +01:00
ciphers_found2[ nr_nonossl_ciphers] = false
hexcode2[ nr_nonossl_ciphers] = " ${ hexcode [i] } "
rfc_ciph2[ nr_nonossl_ciphers] = " ${ rfc_ciph [i] } "
index[ nr_nonossl_ciphers] = $i
nr_nonossl_ciphers += 1
fi
done
fi
if [ [ $nr_nonossl_ciphers -eq 0 ] ] ; then
num_bundles = 0
else
# Some servers can't handle a handshake with >= 128 ciphers. So,
# test cipher suites in bundles of 128 or less.
num_bundles = $nr_nonossl_ciphers /128
mod_check = $nr_nonossl_ciphers %128
[ [ $mod_check -ne 0 ] ] && num_bundles = $num_bundles +1
bundle_size = $nr_nonossl_ciphers /$num_bundles
mod_check = $nr_nonossl_ciphers %$num_bundles
[ [ $mod_check -ne 0 ] ] && bundle_size += 1
fi
2017-10-27 19:52:09 +02:00
for proto in 04 03 02 01 00; do
for ( ( bundle = 0; bundle < num_bundles; bundle++ ) ) ; do
end_of_bundle = $bundle *$bundle_size +$bundle_size
[ [ $end_of_bundle -gt $nr_nonossl_ciphers ] ] && end_of_bundle = $nr_nonossl_ciphers
while true; do
ciphers_to_test = ""
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
! " ${ ciphers_found2 [i] } " && ciphers_to_test += " , ${ hexcode2 [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
[ [ " $proto " = = "04" ] ] && [ [ ! " ${ ciphers_to_test : 2 } " = ~ ,\ 13,[ 0-9a-f] [ 0-9a-f] ] ] && break
ciphers_to_test = " $( strip_inconsistent_ciphers " $proto " " $ciphers_to_test " ) "
[ [ -z " $ciphers_to_test " ] ] && break
if " $SHOW_SIGALGO " ; then
tls_sockets " $proto " " ${ ciphers_to_test : 2 } , 00,ff " "all"
else
tls_sockets " $proto " " ${ ciphers_to_test : 2 } , 00,ff " "ephemeralkey"
fi
sclient_success = $?
[ [ $sclient_success -ne 0 ] ] && [ [ $sclient_success -ne 2 ] ] && break
cipher = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
[ [ " $cipher " = = " ${ rfc_ciph2 [i] } " ] ] && ciphers_found2[ i] = true && break
done
[ [ $i -eq $end_of_bundle ] ] && break
i = ${ index [i] }
ciphers_found[ i] = true
[ [ " ${ kx [i] } " = = "Kx=any" ] ] && kx[ i] = " $( read_dhtype_from_file " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " ) "
if [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ; then
dhlen = $( read_dhbits_from_file " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
fi
" $SHOW_SIGALGO " && [ [ -r " $HOSTCERT " ] ] && \
sigalg[ i] = " $( read_sigalg_from_file " $HOSTCERT " ) "
2016-12-20 19:14:40 +01:00
done
done
done
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
2017-04-13 20:03:51 +02:00
" ${ ciphers_found [i] } " || " $SHOW_EACH_C " || continue
2016-12-20 19:14:40 +01:00
export = " ${ export2 [i] } "
2017-01-25 16:41:36 +01:00
neat_list " ${ normalized_hexcode [i] } " " ${ ciph [i] } " " ${ kx [i] } " " ${ enc [i] } " " ${ ciphers_found [i] } "
2017-04-13 20:03:51 +02:00
available = ""
if " $SHOW_EACH_C " ; then
if " ${ ciphers_found [i] } " ; then
available = "available"
pr_cyan "available"
else
available = "not a/v"
pr_deemphasize "not a/v"
fi
2015-09-17 15:30:15 +02:00
fi
2017-04-13 20:03:51 +02:00
outln " ${ sigalg [i] } "
fileout " cipher_ ${ normalized_hexcode [i] } " "INFO" " $( neat_list " ${ normalized_hexcode [i] } " " ${ ciph [i] } " " ${ kx [i] } " " ${ enc [i] } " ) $available "
2015-09-17 15:30:15 +02:00
done
2016-12-20 19:14:40 +01:00
" $using_sockets " && HAS_DH_BITS = " $has_dh_bits "
2017-08-28 21:11:47 +02:00
tmpfile_handle $FUNCNAME .txt
time_right_align run_cipher_match
fileout_section_footer true
outln
END_TIME = $( date +%s)
SCAN_TIME = $(( END_TIME - START_TIME ))
datebanner " Done"
" $MEASURE_TIME " && printf " % ${ COLUMNS } s\n " " $SCAN_TIME "
[ [ -e " $MEASURE_TIME_FILE " ] ] && echo " Total : $SCAN_TIME " >> " $MEASURE_TIME_FILE "
2016-01-23 19:18:33 +01:00
exit
2015-09-17 15:30:15 +02:00
done
outln
tmpfile_handle $FUNCNAME .txt
return 0 # this is a single test for a cipher
2015-05-17 22:43:53 +02:00
}
2016-04-21 18:04:33 +02:00
# test for all ciphers locally configured (w/o distinguishing whether they are good or bad)
2016-03-30 23:28:31 +02:00
run_allciphers( ) {
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
local -i nr_ciphers_tested = 0 nr_ciphers = 0 nr_ossl_ciphers = 0 nr_nonossl_ciphers = 0 ret
local n auth mac export hexc sslv2_ciphers = "" s
local -a normalized_hexcode hexcode ciph sslvers kx enc export2 sigalg ossl_supported
local -i i end_of_bundle bundle bundle_size num_bundles mod_check
local -a ciphers_found ciphers_found2 hexcode2 ciph2 sslvers2 rfc_ciph2
local -i -a index
2017-10-18 20:25:43 +02:00
local proto protos_to_try
2017-07-03 22:24:02 +02:00
local dhlen available ciphers_to_test supported_sslv2_ciphers
2016-11-04 20:45:07 +01:00
local has_dh_bits = " $HAS_DH_BITS "
local using_sockets = true
2016-03-25 15:00:50 +01:00
2016-11-15 12:59:07 +01:00
" $SSL_NATIVE " && using_sockets = false
" $FAST " && using_sockets = false
2016-11-15 15:20:48 +01:00
[ [ $TLS_NR_CIPHERS = = 0 ] ] && using_sockets = false
2016-11-04 20:45:07 +01:00
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
# get a list of all the cipher suites to test
if " $using_sockets " || [ [ $OSSL_VER_MAJOR -lt 1 ] ] ; then
2016-11-04 20:45:07 +01:00
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
hexc = " $( tolower " ${ TLS_CIPHER_HEXCODE [i] } " ) "
2016-11-04 20:45:07 +01:00
ciph[ i] = " ${ TLS_CIPHER_OSSL_NAME [i] } "
sslvers[ i] = " ${ TLS_CIPHER_SSLVERS [i] } "
kx[ i] = " ${ TLS_CIPHER_KX [i] } "
enc[ i] = " ${ TLS_CIPHER_ENC [i] } "
export2[ i] = " ${ TLS_CIPHER_EXPORT [i] } "
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
ciphers_found[ i] = false
sigalg[ i] = ""
ossl_supported[ i] = ${ TLS_CIPHER_OSSL_SUPPORTED [i] }
if " $using_sockets " && ! " $HAS_DH_BITS " && ( [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ) ; then
ossl_supported[ i] = false
fi
2016-11-04 20:45:07 +01:00
if [ [ ${# hexc } -eq 9 ] ] ; then
hexcode[ i] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } "
if [ [ " ${ hexc : 2 : 2 } " = = "00" ] ] ; then
normalized_hexcode[ i] = " x ${ hexc : 7 : 2 } "
else
normalized_hexcode[ i] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } "
fi
else
hexcode[ i] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } , ${ hexc : 12 : 2 } "
normalized_hexcode[ i] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } ${ hexc : 12 : 2 } "
sslv2_ciphers = " $sslv2_ciphers , ${ hexcode [i] } "
fi
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
if " $using_sockets " || " ${ TLS_CIPHER_OSSL_SUPPORTED [i] } " ; then
nr_ciphers_tested += 1
fi
2016-11-04 20:45:07 +01:00
done
nr_ciphers = $TLS_NR_CIPHERS
else
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
while read hexc n ciph[ nr_ciphers] sslvers[ nr_ciphers] kx[ nr_ciphers] auth enc[ nr_ciphers] mac export2[ nr_ciphers] ; do
ciphers_found[ nr_ciphers] = false
if [ [ ${# hexc } -eq 9 ] ] ; then
if [ [ " ${ hexc : 2 : 2 } " = = "00" ] ] ; then
normalized_hexcode[ nr_ciphers] = " $( tolower " x ${ hexc : 7 : 2 } " ) "
else
normalized_hexcode[ nr_ciphers] = " $( tolower " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } " ) "
fi
else
normalized_hexcode[ nr_ciphers] = " $( tolower " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } ${ hexc : 12 : 2 } " ) "
fi
sigalg[ nr_ciphers] = ""
ossl_supported[ nr_ciphers] = true
2016-11-04 20:45:07 +01:00
nr_ciphers = $nr_ciphers +1
done < <( $OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>>$ERRFILE )
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
nr_ciphers_tested = $nr_ciphers
fi
2016-11-04 20:45:07 +01:00
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
if " $using_sockets " ; then
sslv2_sockets " ${ sslv2_ciphers : 2 } " "true"
if [ [ $? -eq 3 ] ] && [ [ " $V2_HELLO_CIPHERSPEC_LENGTH " -ne 0 ] ] ; then
supported_sslv2_ciphers = " $( grep "Supported cipher: " " $TEMPDIR / $NODEIP .parse_sslv2_serverhello.txt " ) "
2017-10-27 16:34:04 +02:00
" $SHOW_SIGALGO " && s = " $( read_sigalg_from_file " $HOSTCERT " ) "
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
2017-02-24 16:22:59 +01:00
if [ [ " ${ sslvers [i] } " = = "SSLv2" ] ] && [ [ " $supported_sslv2_ciphers " = ~ ${ normalized_hexcode [i] } ] ] ; then
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
ciphers_found[ i] = true
" $SHOW_SIGALGO " && sigalg[ i] = " $s "
fi
done
fi
elif " $HAS_SSL2 " ; then
$OPENSSL s_client $STARTTLS $BUGS -connect $NODEIP :$PORT $PROXY -ssl2 >$TMPFILE 2>$ERRFILE </dev/null
sclient_connect_successful " $? " " $TMPFILE "
if [ [ " $? " -eq 0 ] ] ; then
supported_sslv2_ciphers = " $( grep -A 4 "Ciphers common between both SSL endpoints:" $TMPFILE ) "
2017-10-27 16:34:04 +02:00
" $SHOW_SIGALGO " && s = " $( read_sigalg_from_file " $TMPFILE " ) "
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
2017-02-24 16:22:59 +01:00
if [ [ " ${ sslvers [i] } " = = "SSLv2" ] ] && [ [ " $supported_sslv2_ciphers " = ~ ${ ciph [i] } ] ] ; then
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
ciphers_found[ i] = true
" $SHOW_SIGALGO " && sigalg[ i] = " $s "
fi
done
2016-11-04 20:45:07 +01:00
fi
fi
2015-09-17 15:30:15 +02:00
outln
2016-11-15 13:09:41 +01:00
if " $using_sockets " ; then
2017-02-07 23:08:29 +01:00
pr_headlineln " Testing $nr_ciphers_tested ciphers via OpenSSL plus sockets against the server, ordered by encryption strength "
2016-11-15 13:09:41 +01:00
else
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
pr_headlineln " Testing all $nr_ciphers_tested locally available ciphers against the server, ordered by encryption strength "
2016-11-28 16:45:44 +01:00
[ [ $TLS_NR_CIPHERS = = 0 ] ] && ! " $SSL_NATIVE " && ! " $FAST " && pr_warning " Cipher mapping not available, doing a fallback to openssl"
2016-12-28 23:47:35 +01:00
outln
2016-11-15 15:20:48 +01:00
if ! " $HAS_DH_BITS " ; then
2016-11-28 16:45:44 +01:00
[ [ $TLS_NR_CIPHERS = = 0 ] ] && ! " $SSL_NATIVE " && ! " $FAST " && out "."
2017-02-25 16:31:30 +01:00
prln_warning " Your $OPENSSL cannot show DH/ECDH bits "
2016-11-15 15:20:48 +01:00
fi
2016-11-15 13:09:41 +01:00
fi
2015-09-17 15:30:15 +02:00
outln
neat_header
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
if " ${ ossl_supported [i] } " ; then
2017-10-18 20:25:43 +02:00
[ [ " ${ sslvers [i] } " = = "SSLv2" ] ] && continue
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
ciphers_found2[ nr_ossl_ciphers] = false
sslvers2[ nr_ossl_ciphers] = " ${ sslvers [i] } "
ciph2[ nr_ossl_ciphers] = " ${ ciph [i] } "
index[ nr_ossl_ciphers] = $i
nr_ossl_ciphers += 1
fi
2016-06-10 19:45:25 +02:00
done
2016-03-25 15:00:50 +01:00
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
if [ [ $nr_ossl_ciphers -eq 0 ] ] ; then
num_bundles = 0
else
# Some servers can't handle a handshake with >= 128 ciphers. So,
# test cipher suites in bundles of 128 or less.
num_bundles = $nr_ossl_ciphers /128
mod_check = $nr_ossl_ciphers %128
2016-11-04 19:27:50 +01:00
[ [ $mod_check -ne 0 ] ] && num_bundles = $num_bundles +1
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
bundle_size = $nr_ossl_ciphers /$num_bundles
mod_check = $nr_ossl_ciphers %$num_bundles
[ [ $mod_check -ne 0 ] ] && bundle_size += 1
fi
2017-10-18 20:25:43 +02:00
if " $HAS_TLS13 " ; then
protos_to_try = "-no_ssl2 -tls1_2 -tls1_1 -tls1"
else
protos_to_try = "-no_ssl2 -tls1_1 -tls1"
fi
" $HAS_SSL3 " && protos_to_try += " -ssl3"
for proto in $protos_to_try ; do
if [ [ " $proto " = = "-tls1_1" ] ] ; then
num_bundles = 1
bundle_size = $nr_ossl_ciphers
fi
[ [ " $proto " != "-no_ssl2" ] ] && [ [ $( has_server_protocol " ${ proto : 1 } " ) -eq 1 ] ] && continue
for ( ( bundle = 0; bundle < num_bundles; bundle++ ) ) ; do
end_of_bundle = $bundle *$bundle_size +$bundle_size
[ [ $end_of_bundle -gt $nr_ossl_ciphers ] ] && end_of_bundle = $nr_ossl_ciphers
while true; do
ciphers_to_test = ""
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
! " ${ ciphers_found2 [i] } " && ciphers_to_test += " : ${ ciph2 [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
$OPENSSL s_client $( s_client_options " $proto -cipher " ${ ciphers_to_test : 1 } " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
sclient_connect_successful " $? " " $TMPFILE " || break
cipher = $( get_cipher $TMPFILE )
[ [ -z " $cipher " ] ] && break
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
[ [ " $cipher " = = " ${ ciph2 [i] } " ] ] && ciphers_found2[ i] = true && break
done
[ [ $i -eq $end_of_bundle ] ] && break
i = ${ index [i] }
ciphers_found[ i] = true
[ [ " $cipher " = = TLS13* ] ] && kx[ i] = " $( read_dhtype_from_file $TMPFILE ) "
if [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ; then
dhlen = $( read_dhbits_from_file " $TMPFILE " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
2016-11-04 19:27:50 +01:00
fi
2017-10-18 20:25:43 +02:00
" $SHOW_SIGALGO " && grep -q "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TMPFILE && \
2017-10-27 16:34:04 +02:00
sigalg[ i] = " $( read_sigalg_from_file " $TMPFILE " ) "
2017-10-18 20:25:43 +02:00
done
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
done
done
2016-11-04 19:27:50 +01:00
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
if " $using_sockets " ; then
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
if ! " ${ ciphers_found [i] } " ; then
2017-10-18 20:25:43 +02:00
[ [ " ${ sslvers [i] } " = = "SSLv2" ] ] && continue
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
ciphers_found2[ nr_nonossl_ciphers] = false
sslvers2[ nr_nonossl_ciphers] = " ${ sslvers [i] } "
hexcode2[ nr_nonossl_ciphers] = " ${ hexcode [i] } "
rfc_ciph2[ nr_nonossl_ciphers] = " ${ TLS_CIPHER_RFC_NAME [i] } "
index[ nr_nonossl_ciphers] = $i
nr_nonossl_ciphers += 1
2016-11-04 19:27:50 +01:00
fi
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
done
fi
if [ [ $nr_nonossl_ciphers -eq 0 ] ] ; then
num_bundles = 0
else
# Some servers can't handle a handshake with >= 128 ciphers. So,
# test cipher suites in bundles of 128 or less.
num_bundles = $nr_nonossl_ciphers /128
mod_check = $nr_nonossl_ciphers %128
[ [ $mod_check -ne 0 ] ] && num_bundles = $num_bundles +1
bundle_size = $nr_nonossl_ciphers /$num_bundles
mod_check = $nr_nonossl_ciphers %$num_bundles
[ [ $mod_check -ne 0 ] ] && bundle_size += 1
fi
2017-10-18 20:25:43 +02:00
for proto in 04 03 02 01 00; do
for ( ( bundle = 0; bundle < num_bundles; bundle++ ) ) ; do
end_of_bundle = $bundle *$bundle_size +$bundle_size
[ [ $end_of_bundle -gt $nr_nonossl_ciphers ] ] && end_of_bundle = $nr_nonossl_ciphers
while true; do
ciphers_to_test = ""
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
! " ${ ciphers_found2 [i] } " && ciphers_to_test += " , ${ hexcode2 [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
[ [ " $proto " = = "04" ] ] && [ [ ! " ${ ciphers_to_test : 2 } " = ~ ,\ 13,[ 0-9a-f] [ 0-9a-f] ] ] && break
ciphers_to_test = " $( strip_inconsistent_ciphers " $proto " " $ciphers_to_test " ) "
[ [ -z " $ciphers_to_test " ] ] && break
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
if " $SHOW_SIGALGO " ; then
2017-10-18 20:25:43 +02:00
tls_sockets " $proto " " ${ ciphers_to_test : 2 } , 00,ff " "all"
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
else
2017-10-18 20:25:43 +02:00
tls_sockets " $proto " " ${ ciphers_to_test : 2 } , 00,ff " "ephemeralkey"
2016-11-04 20:45:07 +01:00
fi
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
ret = $?
2017-10-18 20:25:43 +02:00
[ [ $ret -ne 0 ] ] && [ [ $ret -ne 2 ] ] && break
cipher = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
[ [ " $cipher " = = " ${ rfc_ciph2 [i] } " ] ] && ciphers_found2[ i] = true && break
done
[ [ $i -eq $end_of_bundle ] ] && break
i = ${ index [i] }
ciphers_found[ i] = true
[ [ " ${ kx [i] } " = = "Kx=any" ] ] && kx[ i] = " $( read_dhtype_from_file " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " ) "
if [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ; then
dhlen = $( read_dhbits_from_file " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
2016-11-04 19:27:50 +01:00
fi
2017-10-27 16:34:04 +02:00
" $SHOW_SIGALGO " && [ [ -r " $HOSTCERT " ] ] && sigalg[ i] = " $( read_sigalg_from_file " $HOSTCERT " ) "
2017-10-18 20:25:43 +02:00
done
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
done
done
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
2016-11-28 16:45:44 +01:00
if " ${ ciphers_found [i] } " || ( " $SHOW_EACH_C " && ( " $using_sockets " || " ${ ossl_supported [i] } " ) ) ; then
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
export = ${ export2 [i] }
2017-01-25 16:41:36 +01:00
neat_list " ${ normalized_hexcode [i] } " " ${ ciph [i] } " " ${ kx [i] } " " ${ enc [i] } " " ${ ciphers_found [i] } "
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
available = ""
if " $SHOW_EACH_C " ; then
if ${ ciphers_found [i] } ; then
available = "available"
pr_cyan " $available "
2016-11-04 19:27:50 +01:00
else
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
available = "not a/v"
2017-02-06 11:06:59 +01:00
pr_deemphasize " $available "
2016-11-04 19:27:50 +01:00
fi
fi
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
outln " ${ sigalg [i] } "
fileout " cipher_ ${ normalized_hexcode [i] } " "INFO" " $( neat_list " ${ normalized_hexcode [i] } " " ${ ciph [i] } " " ${ kx [i] } " " ${ enc [i] } " ) $available "
fi
2016-03-30 23:28:31 +02:00
done
2016-11-04 20:45:07 +01:00
" $using_sockets " && HAS_DH_BITS = " $has_dh_bits "
2016-03-30 23:28:31 +02:00
2015-09-17 15:30:15 +02:00
outln
return 0
2015-05-17 22:43:53 +02:00
}
2016-03-31 15:38:20 +02:00
# test for all ciphers per protocol locally configured (w/o distinguishing whether they are good or bad)
run_cipher_per_proto( ) {
2016-11-29 17:16:01 +01:00
local proto proto_hex proto_text ossl_ciphers_proto
local -i nr_ciphers nr_ossl_ciphers nr_nonossl_ciphers success
local n sslvers auth mac export hexc sslv2_ciphers = "" cipher
local -a hexcode normalized_hexcode ciph rfc_ciph kx enc export2
local -a hexcode2 ciph2 rfc_ciph2
local -i i bundle end_of_bundle bundle_size num_bundles mod_check
local -a ciphers_found ciphers_found2 sigalg ossl_supported index
2017-07-03 22:24:02 +02:00
local dhlen supported_sslv2_ciphers ciphers_to_test addcmd temp
2016-01-23 19:18:33 +01:00
local available
local id
2016-11-29 17:16:01 +01:00
local has_dh_bits = " $HAS_DH_BITS "
local using_sockets = true
2015-09-17 15:30:15 +02:00
2016-11-29 17:16:01 +01:00
" $SSL_NATIVE " && using_sockets = false
" $FAST " && using_sockets = false
[ [ $TLS_NR_CIPHERS = = 0 ] ] && using_sockets = false
2017-01-17 12:00:18 +01:00
outln
2016-11-29 17:16:01 +01:00
if " $using_sockets " ; then
2017-02-07 23:08:29 +01:00
pr_headlineln " Testing ciphers per protocol via OpenSSL plus sockets against the server, ordered by encryption strength "
2016-11-29 17:16:01 +01:00
else
pr_headlineln " Testing all locally available ciphers per protocol against the server, ordered by encryption strength "
[ [ $TLS_NR_CIPHERS = = 0 ] ] && ! " $SSL_NATIVE " && ! " $FAST " && pr_warning " Cipher mapping not available, doing a fallback to openssl"
2016-12-28 23:47:35 +01:00
outln
2016-11-29 17:16:01 +01:00
if ! " $HAS_DH_BITS " ; then
[ [ $TLS_NR_CIPHERS = = 0 ] ] && ! " $SSL_NATIVE " && ! " $FAST " && out "."
2017-02-25 16:31:30 +01:00
prln_warning " (Your $OPENSSL cannot show DH/ECDH bits) "
2016-11-29 17:16:01 +01:00
fi
fi
2015-09-17 15:30:15 +02:00
outln
neat_header
2017-10-11 17:47:00 +02:00
echo -e " -ssl2 22 SSLv2\n -ssl3 00 SSLv3\n -tls1 01 TLS 1\n -tls1_1 02 TLS 1.1\n -tls1_2 03 TLS 1.2\n -tls1_3 04 TLS 1.3" | while read proto proto_hex proto_text; do
2017-04-14 11:26:01 +02:00
pr_underline " $( printf "%s" " $proto_text " ) "
out " " ; # for local problem if it happens
if ! " $using_sockets " && ! locally_supported " $proto " ; then
continue
fi
2015-09-17 15:30:15 +02:00
outln
2017-10-02 13:48:55 +02:00
[ [ $( has_server_protocol " ${ proto : 1 } " ) -eq 1 ] ] && continue
2016-10-28 15:30:07 +02:00
2016-11-29 17:16:01 +01:00
# get a list of all the cipher suites to test
nr_ciphers = 0
if " $using_sockets " || [ [ $OSSL_VER_MAJOR -lt 1 ] ] ; then
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
2016-11-30 19:40:28 +01:00
hexc = " ${ TLS_CIPHER_HEXCODE [i] } "
2016-11-29 17:16:01 +01:00
ciph[ nr_ciphers] = " ${ TLS_CIPHER_OSSL_NAME [i] } "
rfc_ciph[ nr_ciphers] = " ${ TLS_CIPHER_RFC_NAME [i] } "
kx[ nr_ciphers] = " ${ TLS_CIPHER_KX [i] } "
enc[ nr_ciphers] = " ${ TLS_CIPHER_ENC [i] } "
export2[ nr_ciphers] = " ${ TLS_CIPHER_EXPORT [i] } "
ciphers_found[ nr_ciphers] = false
sigalg[ nr_ciphers] = ""
ossl_supported[ nr_ciphers] = ${ TLS_CIPHER_OSSL_SUPPORTED [i] }
2016-12-16 17:30:34 +01:00
if " $using_sockets " && ! " $has_dh_bits " && ( [ [ ${ kx [nr_ciphers] } = = "Kx=ECDH" ] ] || [ [ ${ kx [nr_ciphers] } = = "Kx=DH" ] ] || [ [ ${ kx [nr_ciphers] } = = "Kx=EDH" ] ] ) ; then
2016-11-29 17:16:01 +01:00
ossl_supported[ nr_ciphers] = false
fi
if [ [ ${# hexc } -eq 9 ] ] ; then
hexcode[ nr_ciphers] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } "
if [ [ " ${ hexc : 2 : 2 } " = = "00" ] ] ; then
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 7 : 2 } "
else
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } "
fi
else
2016-11-30 19:40:28 +01:00
hexc = " $( tolower " $hexc " ) "
2016-11-29 17:16:01 +01:00
hexcode[ nr_ciphers] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } , ${ hexc : 12 : 2 } "
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } ${ hexc : 12 : 2 } "
fi
if ( " $using_sockets " || " ${ TLS_CIPHER_OSSL_SUPPORTED [i] } " ) ; then
if [ [ ${# hexc } -eq 9 ] ] && [ [ " $proto_text " != "SSLv2" ] ] ; then
if [ [ " $proto_text " = = "TLS 1.3" ] ] ; then
[ [ " ${ hexc : 2 : 2 } " = = "13" ] ] && nr_ciphers += 1
elif [ [ " $proto_text " = = "TLS 1.2" ] ] ; then
[ [ " ${ hexc : 2 : 2 } " != "13" ] ] && nr_ciphers += 1
2017-02-24 16:22:59 +01:00
elif [ [ ! " ${ TLS_CIPHER_RFC_NAME [i] } " = ~ SHA256 ] ] && [ [ ! " ${ TLS_CIPHER_RFC_NAME [i] } " = ~ SHA384 ] ] && \
2016-11-29 17:16:01 +01:00
[ [ " ${ TLS_CIPHER_RFC_NAME [i] } " != *"_CCM" ] ] && [ [ " ${ TLS_CIPHER_RFC_NAME [i] } " != *"_CCM_8" ] ] ; then
nr_ciphers += 1
fi
elif [ [ ${# hexc } -eq 14 ] ] && [ [ " $proto_text " = = "SSLv2" ] ] ; then
sslv2_ciphers += " , ${ hexcode [nr_ciphers] } "
nr_ciphers += 1
fi
fi
done
2017-04-14 11:26:01 +02:00
else # no sockets, openssl!
2016-11-29 17:16:01 +01:00
# The OpenSSL ciphers function, prior to version 1.1.0, could only understand -ssl2, -ssl3, and -tls1.
if [ [ " $proto " = = "-ssl2" ] ] || [ [ " $proto " = = "-ssl3" ] ] || \
[ [ $OSSL_VER_MAJOR .$OSSL_VER_MINOR = = "1.1.0" * ] ] || [ [ $OSSL_VER_MAJOR .$OSSL_VER_MINOR = = "1.1.1" * ] ] ; then
ossl_ciphers_proto = " $proto "
else
ossl_ciphers_proto = "-tls1"
fi
while read hexc n ciph[ nr_ciphers] sslvers kx[ nr_ciphers] auth enc[ nr_ciphers] mac export2[ nr_ciphers] ; do
2017-10-11 17:47:00 +02:00
if [ [ " $proto_text " = = "TLS 1.3" ] ] ; then
[ [ " ${ ciph [nr_ciphers] } " = = TLS13* ] ] || continue
elif [ [ " $proto_text " = = "TLS 1.2" ] ] ; then
[ [ " ${ ciph [nr_ciphers] } " != TLS13* ] ] || continue
elif [ [ " ${ ciph [nr_ciphers] } " = = *"-SHA256" ] ] || [ [ " ${ ciph [nr_ciphers] } " = = *"-SHA384" ] ] || \
[ [ " ${ ciph [nr_ciphers] } " = = *"-CCM" ] ] || [ [ " ${ ciph [nr_ciphers] } " = = *"-CCM8" ] ] || \
[ [ " ${ ciph [nr_ciphers] } " = ~ CHACHA20-POLY1305 ] ] ; then
continue
2016-11-29 17:16:01 +01:00
fi
2017-10-11 17:47:00 +02:00
ciphers_found[ nr_ciphers] = false
normalized_hexcode[ nr_ciphers] = " $( normalize_ciphercode " $hexc " ) "
sigalg[ nr_ciphers] = ""
ossl_supported[ nr_ciphers] = true
nr_ciphers += 1
2016-11-29 17:16:01 +01:00
done < <( $OPENSSL ciphers $ossl_ciphers_proto -V 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>>$ERRFILE )
2016-04-21 20:05:19 +02:00
fi
2016-03-31 15:38:20 +02:00
2017-04-14 11:26:01 +02:00
if [ [ " $proto " = = "-ssl2" ] ] ; then
if " $using_sockets " ; then
sslv2_sockets " ${ sslv2_ciphers : 2 } " "true"
if [ [ $? -eq 3 ] ] && [ [ " $V2_HELLO_CIPHERSPEC_LENGTH " -ne 0 ] ] ; then
supported_sslv2_ciphers = " $( grep "Supported cipher: " " $TEMPDIR / $NODEIP .parse_sslv2_serverhello.txt " ) "
2017-10-27 16:34:04 +02:00
" $SHOW_SIGALGO " && s = " $( read_sigalg_from_file " $HOSTCERT " ) "
2017-04-14 11:26:01 +02:00
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
if [ [ " $supported_sslv2_ciphers " = ~ ${ normalized_hexcode [i] } ] ] ; then
ciphers_found[ i] = true
" $SHOW_SIGALGO " && sigalg[ i] = " $s "
fi
done
fi
else
$OPENSSL s_client $STARTTLS $BUGS -connect $NODEIP :$PORT $PROXY -ssl2 >$TMPFILE 2>$ERRFILE </dev/null
sclient_connect_successful " $? " " $TMPFILE "
if [ [ " $? " -eq 0 ] ] ; then
supported_sslv2_ciphers = " $( grep -A 4 "Ciphers common between both SSL endpoints:" $TMPFILE ) "
2017-10-27 16:34:04 +02:00
" $SHOW_SIGALGO " && s = " $( read_sigalg_from_file " $TMPFILE " ) "
2017-04-14 11:26:01 +02:00
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
if [ [ " $supported_sslv2_ciphers " = ~ ${ ciph [i] } ] ] ; then
ciphers_found[ i] = true
" $SHOW_SIGALGO " && sigalg[ i] = " $s "
fi
done
fi
2016-11-29 17:16:01 +01:00
fi
2017-04-14 11:26:01 +02:00
else # no SSLv2
2016-11-29 17:16:01 +01:00
nr_ossl_ciphers = 0
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
if " ${ ossl_supported [i] } " ; then
ciphers_found2[ nr_ossl_ciphers] = false
ciph2[ nr_ossl_ciphers] = " ${ ciph [i] } "
index[ nr_ossl_ciphers] = $i
nr_ossl_ciphers += 1
fi
done
if [ [ $nr_ossl_ciphers -eq 0 ] ] ; then
num_bundles = 0
else
# Some servers can't handle a handshake with >= 128 ciphers. So,
# test cipher suites in bundles of 128 or less.
num_bundles = $nr_ossl_ciphers /128
mod_check = $nr_ossl_ciphers %128
[ [ $mod_check -ne 0 ] ] && num_bundles = $num_bundles +1
bundle_size = $nr_ossl_ciphers /$num_bundles
mod_check = $nr_ossl_ciphers %$num_bundles
[ [ $mod_check -ne 0 ] ] && bundle_size += 1
fi
2016-03-31 15:38:20 +02:00
2016-11-29 17:16:01 +01:00
for ( ( bundle = 0; bundle < num_bundles; bundle++ ) ) ; do
end_of_bundle = $bundle *$bundle_size +$bundle_size
[ [ $end_of_bundle -gt $nr_ossl_ciphers ] ] && end_of_bundle = $nr_ossl_ciphers
for ( ( success = 0; success = = 0 ; 1 ) ) ; do
ciphers_to_test = ""
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
! " ${ ciphers_found2 [i] } " && ciphers_to_test += " : ${ ciph2 [i] } "
done
success = 1
if [ [ -n " $ciphers_to_test " ] ] ; then
2017-07-03 22:24:02 +02:00
$OPENSSL s_client $( s_client_options "-cipher " ${ ciphers_to_test : 1 } " $proto $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
2016-11-29 17:16:01 +01:00
sclient_connect_successful " $? " " $TMPFILE "
if [ [ " $? " -eq 0 ] ] ; then
2017-04-13 16:32:19 +02:00
cipher = $( get_cipher $TMPFILE )
2016-11-29 17:16:01 +01:00
if [ [ -n " $cipher " ] ] ; then
success = 0
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
[ [ " $cipher " = = " ${ ciph2 [i] } " ] ] && ciphers_found2[ i] = true && break
done
i = ${ index [i] }
ciphers_found[ i] = true
2017-10-11 17:47:00 +02:00
[ [ " $proto_text " = = "TLS 1.3" ] ] && kx[ i] = " $( read_dhtype_from_file $TMPFILE ) "
2016-11-29 17:16:01 +01:00
if [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ; then
dhlen = $( read_dhbits_from_file " $TMPFILE " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
fi
" $SHOW_SIGALGO " && grep -q "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TMPFILE && \
2017-10-11 17:47:00 +02:00
sigalg[ i] = " $( read_sigalg_from_file " $TMPFILE " ) "
2016-11-29 17:16:01 +01:00
fi
fi
fi
done
2016-06-10 19:45:25 +02:00
done
2016-11-29 17:16:01 +01:00
if " $using_sockets " ; then
nr_nonossl_ciphers = 0
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
if ! " ${ ciphers_found [i] } " ; then
ciphers_found2[ nr_nonossl_ciphers] = false
hexcode2[ nr_nonossl_ciphers] = " ${ hexcode [i] } "
rfc_ciph2[ nr_nonossl_ciphers] = " ${ rfc_ciph [i] } "
index[ nr_nonossl_ciphers] = $i
nr_nonossl_ciphers += 1
fi
done
fi
2016-03-31 15:38:20 +02:00
2016-11-29 17:16:01 +01:00
if [ [ $nr_nonossl_ciphers -eq 0 ] ] ; then
num_bundles = 0
else
# Some servers can't handle a handshake with >= 128 ciphers. So,
# test cipher suites in bundles of 128 or less.
num_bundles = $nr_nonossl_ciphers /128
mod_check = $nr_nonossl_ciphers %128
[ [ $mod_check -ne 0 ] ] && num_bundles = $num_bundles +1
bundle_size = $nr_nonossl_ciphers /$num_bundles
mod_check = $nr_nonossl_ciphers %$num_bundles
[ [ $mod_check -ne 0 ] ] && bundle_size += 1
fi
for ( ( bundle = 0; bundle < num_bundles; bundle++ ) ) ; do
end_of_bundle = $bundle *$bundle_size +$bundle_size
[ [ $end_of_bundle -gt $nr_nonossl_ciphers ] ] && end_of_bundle = $nr_nonossl_ciphers
for ( ( success = 0; success = = 0 ; 1 ) ) ; do
ciphers_to_test = ""
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
! " ${ ciphers_found2 [i] } " && ciphers_to_test += " , ${ hexcode2 [i] } "
done
success = 1
if [ [ -n " $ciphers_to_test " ] ] ; then
if " $SHOW_SIGALGO " ; then
tls_sockets " $proto_hex " " ${ ciphers_to_test : 2 } , 00,ff " "all"
else
tls_sockets " $proto_hex " " ${ ciphers_to_test : 2 } , 00,ff " "ephemeralkey"
fi
if [ [ $? -eq 0 ] ] ; then
success = 0
2017-04-13 16:32:19 +02:00
cipher = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
2016-11-29 17:16:01 +01:00
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
[ [ " $cipher " = = " ${ rfc_ciph2 [i] } " ] ] && ciphers_found2[ i] = true && break
done
i = ${ index [i] }
ciphers_found[ i] = true
2017-10-11 17:47:00 +02:00
[ [ " $proto_text " = = "TLS 1.3" ] ] && kx[ i] = " $( read_dhtype_from_file " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " ) "
2016-11-29 17:16:01 +01:00
if [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ; then
dhlen = $( read_dhbits_from_file " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
fi
" $SHOW_SIGALGO " && [ [ -r " $HOSTCERT " ] ] && \
2017-04-14 11:26:01 +02:00
sigalg[ i] = " $( read_sigalg_from_file " $HOSTCERT " ) "
2016-11-29 17:16:01 +01:00
fi
fi
done
2016-06-10 19:45:25 +02:00
done
2016-11-29 17:16:01 +01:00
fi
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
if " ${ ciphers_found [i] } " || " $SHOW_EACH_C " ; then
export = ${ export2 [i] }
2016-11-30 19:40:28 +01:00
normalized_hexcode[ i] = " $( tolower " ${ normalized_hexcode [i] } " ) "
2017-01-25 16:41:36 +01:00
neat_list " ${ normalized_hexcode [i] } " " ${ ciph [i] } " " ${ kx [i] } " " ${ enc [i] } " " ${ ciphers_found [i] } "
2016-11-29 17:16:01 +01:00
available = ""
if " $SHOW_EACH_C " ; then
if " ${ ciphers_found [i] } " ; then
2016-06-15 21:31:10 +02:00
available = "available"
pr_cyan " $available "
2016-11-29 17:16:01 +01:00
else
2016-03-31 15:38:20 +02:00
available = "not a/v"
2017-02-06 11:06:59 +01:00
pr_deemphasize " $available "
2016-11-29 17:16:01 +01:00
fi
fi
outln " ${ sigalg [i] } "
id = " cipher $proto "
id += " _ ${ normalized_hexcode [i] } "
fileout " $id " "INFO" " $proto_text $( neat_list " ${ normalized_hexcode [i] } " " ${ ciph [i] } " " ${ kx [i] } " " ${ enc [i] } " ) $available "
fi
2015-09-17 15:30:15 +02:00
done
done
2016-11-29 17:16:01 +01:00
" $using_sockets " && HAS_DH_BITS = " $has_dh_bits "
2016-03-31 15:38:20 +02:00
tmpfile_handle $FUNCNAME .txt
2015-09-17 15:30:15 +02:00
return 0
2015-05-17 22:43:53 +02:00
}
2016-06-17 22:33:00 +02:00
# arg1 is an ASCII-HEX encoded SSLv3 or TLS ClientHello.
# If the ClientHello contains a server name extension, then
# either:
# 1) replace it with one corresponding to $SNI; or
# 2) remove it, if $SNI is empty
create_client_simulation_tls_clienthello( ) {
local tls_handshake_ascii = " $1 "
local -i len offset tls_handshake_ascii_len len_all len_clienthello
local -i len_extensions len_extension
local content_type tls_version_reclayer handshake_msg_type tls_clientversion
local tls_random tls_sid tls_cipher_suites tls_compression_methods
local tls_extensions = "" extension_type len_extensions_hex
local len_servername hexdump_format_str servername_hexstr
local len_servername_hex len_sni_listlen len_sni_ext
local tls_client_hello len_clienthello_hex tls_handshake_ascii_len_hex
local sni_extension_found = false
tls_handshake_ascii_len = ${# tls_handshake_ascii }
tls_content_type = " ${ tls_handshake_ascii : 0 : 2 } "
tls_version_reclayer = " ${ tls_handshake_ascii : 2 : 4 } "
len_all = $( hex2dec " ${ tls_handshake_ascii : 6 : 4 } " )
handshake_msg_type = " ${ tls_handshake_ascii : 10 : 2 } "
len_clienthello = $( hex2dec " ${ tls_handshake_ascii : 12 : 6 } " )
tls_clientversion = " ${ tls_handshake_ascii : 18 : 4 } "
tls_random = " ${ tls_handshake_ascii : 22 : 64 } "
len = 2*$( hex2dec " ${ tls_handshake_ascii : 86 : 2 } " ) +2
tls_sid = " ${ tls_handshake_ascii : 86 : $len } "
offset = 86+$len
len = 2*$( hex2dec " ${ tls_handshake_ascii : $offset : 4 } " ) +4
tls_cipher_suites = " ${ tls_handshake_ascii : $offset : $len } "
offset = $offset +$len
len = 2*$( hex2dec " ${ tls_handshake_ascii : $offset : 2 } " ) +2
tls_compression_methods = " ${ tls_handshake_ascii : $offset : $len } "
offset = $offset +$len
2016-10-28 15:30:07 +02:00
2016-06-17 22:33:00 +02:00
if [ [ $offset -ge $tls_handshake_ascii_len ] ] ; then
# No extensions
2017-02-25 16:31:30 +01:00
tm_out " $tls_handshake_ascii "
2016-06-17 22:33:00 +02:00
return 0
fi
len_extensions = 2*$( hex2dec " ${ tls_handshake_ascii : $offset : 4 } " )
offset = $offset +4
for ( ( 1; offset < tls_handshake_ascii_len; 1 ) ) ; do
2017-03-24 16:37:06 +01:00
extension_type = " ${ tls_handshake_ascii : $offset : 4 } "
offset = $offset +4
len_extension = 2*$( hex2dec " ${ tls_handshake_ascii : $offset : 4 } " )
2016-06-17 22:33:00 +02:00
2017-03-24 16:37:06 +01:00
if [ [ " $extension_type " != "0000" ] ] ; then
2016-06-17 22:33:00 +02:00
# The extension will just be copied into the revised ClientHello
offset = $offset -4
len = $len_extension +8
tls_extensions += " ${ tls_handshake_ascii : $offset : $len } "
offset = $offset +$len
2017-03-24 16:37:06 +01:00
else
sni_extension_found = true
if [ [ -n " $SNI " ] ] ; then
# Create a server name extension that corresponds to $SNI
len_servername = ${# NODE }
hexdump_format_str = " $len_servername /1 \"%02x\" "
servername_hexstr = $( printf $NODE | hexdump -v -e " ${ hexdump_format_str } " )
# convert lengths we need to fill in from dec to hex:
len_servername_hex = $( printf "%02x\n" $len_servername )
len_sni_listlen = $( printf "%02x\n" $(( len_servername+3)) )
len_sni_ext = $( printf "%02x\n" $(( len_servername+5)) )
tls_extensions += " 000000 ${ len_sni_ext } 00 ${ len_sni_listlen } 0000 ${ len_servername_hex } ${ servername_hexstr } "
offset = $offset +$len_extension +4
fi
fi
2016-06-17 22:33:00 +02:00
done
if ! $sni_extension_found ; then
2017-02-25 16:31:30 +01:00
tm_out " $tls_handshake_ascii "
2016-06-17 22:33:00 +02:00
return 0
fi
len_extensions = ${# tls_extensions } /2
len_extensions_hex = $( printf "%02x\n" $len_extensions )
len2twobytes " $len_extensions_hex "
tls_extensions = " ${ LEN_STR : 0 : 2 } ${ LEN_STR : 4 : 2 } ${ tls_extensions } "
tls_client_hello = " ${ tls_clientversion } ${ tls_random } ${ tls_sid } ${ tls_cipher_suites } ${ tls_compression_methods } ${ tls_extensions } "
len_clienthello = ${# tls_client_hello } /2
len_clienthello_hex = $( printf "%02x\n" $len_clienthello )
len2twobytes " $len_clienthello_hex "
tls_handshake_ascii = " ${ handshake_msg_type } 00 ${ LEN_STR : 0 : 2 } ${ LEN_STR : 4 : 2 } ${ tls_client_hello } "
tls_handshake_ascii_len = ${# tls_handshake_ascii } /2
tls_handshake_ascii_len_hex = $( printf "%02x\n" $tls_handshake_ascii_len )
len2twobytes " $tls_handshake_ascii_len_hex "
tls_handshake_ascii = " ${ tls_content_type } ${ tls_version_reclayer } ${ LEN_STR : 0 : 2 } ${ LEN_STR : 4 : 2 } ${ tls_handshake_ascii } "
2017-02-25 16:31:30 +01:00
tm_out " $tls_handshake_ascii "
2016-06-17 22:33:00 +02:00
return 0
}
client_simulation_sockets( ) {
local -i len i ret = 0
local -i save = 0
local lines clienthello data = ""
2017-07-25 18:46:37 +02:00
local cipher_list_2send = ""
2016-11-04 16:51:34 +01:00
local sock_reply_file2 sock_reply_file3
local tls_hello_ascii next_packet hello_done = 0
2017-07-21 16:59:23 +02:00
local -i sid_len offset1 offset2
2016-06-17 22:33:00 +02:00
if [ [ " ${ 1 : 0 : 4 } " = = "1603" ] ] ; then
clienthello = " $( create_client_simulation_tls_clienthello " $1 " ) "
else
clienthello = " $1 "
fi
len = ${# clienthello }
for ( ( i = 0; i < len; i = i+2 ) ) ; do
2016-11-04 16:51:34 +01:00
data += " , ${ clienthello : i : 2 } "
2016-06-17 22:33:00 +02:00
done
2017-07-25 16:19:36 +02:00
# same as above. If a CIPHER_SUITES string was provided, then check that it is in the ServerHello
# this appeared 1st in yassl + MySQL (https://github.com/drwetter/testssl.sh/pull/784) but adds
# robustness to the implementation
# see also https://github.com/drwetter/testssl.sh/pull/797
2017-07-24 17:26:18 +02:00
if [ [ " ${ 1 : 0 : 4 } " = = "1603" ] ] ; then
# Extact list of cipher suites from SSLv3 or later ClientHello
sid_len = 4*$( hex2dec " ${ data : 174 : 2 } " )
offset1 = 178+$sid_len
offset2 = 182+$sid_len
len = 4*$( hex2dec " ${ data : offset1 : 2 } ${ data : offset2 : 2 } " ) -2
offset1 = 186+$sid_len
2017-07-25 18:46:37 +02:00
code2network " $( tolower " ${ data : offset1 : len } " ) " # convert CIPHER_SUITES to a "standardized" format
2017-07-24 17:26:18 +02:00
else
# Extact list of cipher suites from SSLv2 ClientHello
2017-07-25 18:46:37 +02:00
len = 2*$( hex2dec " ${ clienthello : 12 : 2 } " )
for ( ( i = 22; i < 22+len; i = i+6 ) ) ; do
offset1 = $i +2
offset2 = $i +4
[ [ " ${ clienthello : i : 2 } " = = "00" ] ] && cipher_list_2send += " , ${ clienthello : offset1 : 2 } , ${ clienthello : offset2 : 2 } "
done
code2network " $( tolower " ${ cipher_list_2send : 2 } " ) " # convert CIPHER_SUITES to a "standardized" format
2017-07-24 17:26:18 +02:00
fi
2017-07-21 16:59:23 +02:00
cipher_list_2send = " $NW_STR "
2017-07-26 22:37:50 +02:00
debugme echo -e "\nsending client hello... "
2016-06-17 22:33:00 +02:00
code2network " ${ data } "
2017-02-24 16:22:59 +01:00
data = " $NW_STR "
2017-06-30 20:15:13 +02:00
fd_socket 5 || return 6
2016-06-17 22:33:00 +02:00
[ [ " $DEBUG " -ge 4 ] ] && echo " \" $data \" "
printf -- " $data " >& 5 2>/dev/null &
sleep $USLEEP_SND
sockread_serverhello 32768
2016-11-03 15:02:45 +01:00
tls_hello_ascii = $( hexdump -v -e '16/1 "%02X"' " $SOCK_REPLY_FILE " )
tls_hello_ascii = " ${ tls_hello_ascii %%[!0-9A-F]* } "
2017-10-03 22:10:09 +02:00
# Check if the response is a HelloRetryRequest.
resend_if_hello_retry_request " $tls_hello_ascii " " $cipher_list_2send " " $4 " " $process_full "
ret = $?
if [ [ $ret -eq 2 ] ] ; then
tls_hello_ascii = $( hexdump -v -e '16/1 "%02X"' " $SOCK_REPLY_FILE " )
tls_hello_ascii = " ${ tls_hello_ascii %%[!0-9A-F]* } "
elif [ [ $ret -eq 1 ] ] || [ [ $ret -eq 6 ] ] ; then
close_socket
TMPFILE = $SOCK_REPLY_FILE
tmpfile_handle $FUNCNAME .dd
return $ret
fi
client simulation and SSLv2 servers
The data for `run_client_simulation()` currently includes two clients that send version 2.0 CLIENT-HELLO messages (see Appendix E.2 of RFC 5246). Each of the CLIENT-HELLO messages advertises support for newer protocol versions (SSLv3 in the case of IE6XP and TLSv1.0 in the case of Java 6u45). A server may reject one of these version 2.0 CLIENT-HELLO messages, or it may respond with an SSLv2, SSLv3, or TLSv1.0 ServerHello.
The current code in `client_simulation_sockets()` assumes that the server's response with be an SSLv3 or later ServerHello. So, it can support cases in which servers respond with an SSLv3 or TLSv1.0 ServerHello (once PR #800 is accepted to undo the mistake in PR #797), but not cases in which servers response with an SSLv2 ServerHello.
This PR adds code to `client_simulation_sockets()` to check if the server's response is an SSLv2 ServerHello, so that it can process such responses with `parse_sslv2_serverhello()` rather than `parse_tls_serverhello()`.
When a connection is made using SSLv3 or later, `run_client_simulation()` will show to the protocol and cipher selected for the connection. With this PR, if the connection is made using SSLv2, `run_client_simulation()` will just show "SSLv2." In the case of SSLv2, the ServerHello contains a list of all ciphers that the server and client have in common, and it is up to the client to choose one. So, if the client and server have more than one cipher in common, more information about the client would be needed to know which cipher it would choose.
2017-07-26 20:02:56 +02:00
if [ [ " ${ tls_hello_ascii : 0 : 1 } " != "8" ] ] ; then
2017-10-03 22:10:09 +02:00
check_tls_serverhellodone " $tls_hello_ascii " "ephemeralkey"
client simulation and SSLv2 servers
The data for `run_client_simulation()` currently includes two clients that send version 2.0 CLIENT-HELLO messages (see Appendix E.2 of RFC 5246). Each of the CLIENT-HELLO messages advertises support for newer protocol versions (SSLv3 in the case of IE6XP and TLSv1.0 in the case of Java 6u45). A server may reject one of these version 2.0 CLIENT-HELLO messages, or it may respond with an SSLv2, SSLv3, or TLSv1.0 ServerHello.
The current code in `client_simulation_sockets()` assumes that the server's response with be an SSLv3 or later ServerHello. So, it can support cases in which servers respond with an SSLv3 or TLSv1.0 ServerHello (once PR #800 is accepted to undo the mistake in PR #797), but not cases in which servers response with an SSLv2 ServerHello.
This PR adds code to `client_simulation_sockets()` to check if the server's response is an SSLv2 ServerHello, so that it can process such responses with `parse_sslv2_serverhello()` rather than `parse_tls_serverhello()`.
When a connection is made using SSLv3 or later, `run_client_simulation()` will show to the protocol and cipher selected for the connection. With this PR, if the connection is made using SSLv2, `run_client_simulation()` will just show "SSLv2." In the case of SSLv2, the ServerHello contains a list of all ciphers that the server and client have in common, and it is up to the client to choose one. So, if the client and server have more than one cipher in common, more information about the client would be needed to know which cipher it would choose.
2017-07-26 20:02:56 +02:00
hello_done = $?
fi
2016-11-04 16:51:34 +01:00
for ( ( 1 ; hello_done = = 1; 1 ) ) ; do
2017-10-10 20:48:36 +02:00
if [ [ $DEBUG -ge 1 ] ] ; then
sock_reply_file2 = ${ SOCK_REPLY_FILE } .2
mv " $SOCK_REPLY_FILE " " $sock_reply_file2 "
fi
2016-11-04 16:51:34 +01:00
2017-07-26 22:37:50 +02:00
debugme echo -n "requesting more server hello data... "
2016-11-04 16:51:34 +01:00
socksend "" $USLEEP_SND
sockread_serverhello 32768
next_packet = $( hexdump -v -e '16/1 "%02X"' " $SOCK_REPLY_FILE " )
next_packet = " ${ next_packet %%[!0-9A-F]* } "
if [ [ ${# next_packet } -eq 0 ] ] ; then
# This shouldn't be necessary. However, it protects against
# getting into an infinite loop if the server has nothing
# left to send and check_tls_serverhellodone doesn't
# correctly catch it.
2017-10-10 20:48:36 +02:00
[ [ $DEBUG -ge 1 ] ] && mv " $sock_reply_file2 " " $SOCK_REPLY_FILE "
2016-11-04 16:51:34 +01:00
hello_done = 0
else
tls_hello_ascii += " $next_packet "
2017-10-10 20:48:36 +02:00
if [ [ $DEBUG -ge 1 ] ] ; then
sock_reply_file3 = ${ SOCK_REPLY_FILE } .3
mv " $SOCK_REPLY_FILE " " $sock_reply_file3 " #FIXME: we moved that already
mv " $sock_reply_file2 " " $SOCK_REPLY_FILE "
cat " $sock_reply_file3 " >> " $SOCK_REPLY_FILE "
rm " $sock_reply_file3 "
fi
2016-11-04 16:51:34 +01:00
2017-10-03 22:10:09 +02:00
check_tls_serverhellodone " $tls_hello_ascii " "ephemeralkey"
2016-11-04 16:51:34 +01:00
hello_done = $?
fi
done
2017-07-26 22:37:50 +02:00
debugme echo "reading server hello..."
2016-06-17 22:33:00 +02:00
if [ [ " $DEBUG " -ge 4 ] ] ; then
hexdump -C $SOCK_REPLY_FILE | head -6
echo
fi
client simulation and SSLv2 servers
The data for `run_client_simulation()` currently includes two clients that send version 2.0 CLIENT-HELLO messages (see Appendix E.2 of RFC 5246). Each of the CLIENT-HELLO messages advertises support for newer protocol versions (SSLv3 in the case of IE6XP and TLSv1.0 in the case of Java 6u45). A server may reject one of these version 2.0 CLIENT-HELLO messages, or it may respond with an SSLv2, SSLv3, or TLSv1.0 ServerHello.
The current code in `client_simulation_sockets()` assumes that the server's response with be an SSLv3 or later ServerHello. So, it can support cases in which servers respond with an SSLv3 or TLSv1.0 ServerHello (once PR #800 is accepted to undo the mistake in PR #797), but not cases in which servers response with an SSLv2 ServerHello.
This PR adds code to `client_simulation_sockets()` to check if the server's response is an SSLv2 ServerHello, so that it can process such responses with `parse_sslv2_serverhello()` rather than `parse_tls_serverhello()`.
When a connection is made using SSLv3 or later, `run_client_simulation()` will show to the protocol and cipher selected for the connection. With this PR, if the connection is made using SSLv2, `run_client_simulation()` will just show "SSLv2." In the case of SSLv2, the ServerHello contains a list of all ciphers that the server and client have in common, and it is up to the client to choose one. So, if the client and server have more than one cipher in common, more information about the client would be needed to know which cipher it would choose.
2017-07-26 20:02:56 +02:00
if [ [ " ${ tls_hello_ascii : 0 : 1 } " = = "8" ] ] ; then
parse_sslv2_serverhello " $SOCK_REPLY_FILE " "false"
if [ [ $? -eq 3 ] ] && [ [ " $V2_HELLO_CIPHERSPEC_LENGTH " -ne 0 ] ] ; then
echo "Protocol : SSLv2" > " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt "
DETECTED_TLS_VERSION = "0200"
ret = 0
2016-11-04 16:51:34 +01:00
else
client simulation and SSLv2 servers
The data for `run_client_simulation()` currently includes two clients that send version 2.0 CLIENT-HELLO messages (see Appendix E.2 of RFC 5246). Each of the CLIENT-HELLO messages advertises support for newer protocol versions (SSLv3 in the case of IE6XP and TLSv1.0 in the case of Java 6u45). A server may reject one of these version 2.0 CLIENT-HELLO messages, or it may respond with an SSLv2, SSLv3, or TLSv1.0 ServerHello.
The current code in `client_simulation_sockets()` assumes that the server's response with be an SSLv3 or later ServerHello. So, it can support cases in which servers respond with an SSLv3 or TLSv1.0 ServerHello (once PR #800 is accepted to undo the mistake in PR #797), but not cases in which servers response with an SSLv2 ServerHello.
This PR adds code to `client_simulation_sockets()` to check if the server's response is an SSLv2 ServerHello, so that it can process such responses with `parse_sslv2_serverhello()` rather than `parse_tls_serverhello()`.
When a connection is made using SSLv3 or later, `run_client_simulation()` will show to the protocol and cipher selected for the connection. With this PR, if the connection is made using SSLv2, `run_client_simulation()` will just show "SSLv2." In the case of SSLv2, the ServerHello contains a list of all ciphers that the server and client have in common, and it is up to the client to choose one. So, if the client and server have more than one cipher in common, more information about the client would be needed to know which cipher it would choose.
2017-07-26 20:02:56 +02:00
ret = 1
2016-11-04 16:51:34 +01:00
fi
client simulation and SSLv2 servers
The data for `run_client_simulation()` currently includes two clients that send version 2.0 CLIENT-HELLO messages (see Appendix E.2 of RFC 5246). Each of the CLIENT-HELLO messages advertises support for newer protocol versions (SSLv3 in the case of IE6XP and TLSv1.0 in the case of Java 6u45). A server may reject one of these version 2.0 CLIENT-HELLO messages, or it may respond with an SSLv2, SSLv3, or TLSv1.0 ServerHello.
The current code in `client_simulation_sockets()` assumes that the server's response with be an SSLv3 or later ServerHello. So, it can support cases in which servers respond with an SSLv3 or TLSv1.0 ServerHello (once PR #800 is accepted to undo the mistake in PR #797), but not cases in which servers response with an SSLv2 ServerHello.
This PR adds code to `client_simulation_sockets()` to check if the server's response is an SSLv2 ServerHello, so that it can process such responses with `parse_sslv2_serverhello()` rather than `parse_tls_serverhello()`.
When a connection is made using SSLv3 or later, `run_client_simulation()` will show to the protocol and cipher selected for the connection. With this PR, if the connection is made using SSLv2, `run_client_simulation()` will just show "SSLv2." In the case of SSLv2, the ServerHello contains a list of all ciphers that the server and client have in common, and it is up to the client to choose one. So, if the client and server have more than one cipher in common, more information about the client would be needed to know which cipher it would choose.
2017-07-26 20:02:56 +02:00
else
parse_tls_serverhello " $tls_hello_ascii " "ephemeralkey" " $cipher_list_2send "
save = $?
2016-11-04 16:51:34 +01:00
client simulation and SSLv2 servers
The data for `run_client_simulation()` currently includes two clients that send version 2.0 CLIENT-HELLO messages (see Appendix E.2 of RFC 5246). Each of the CLIENT-HELLO messages advertises support for newer protocol versions (SSLv3 in the case of IE6XP and TLSv1.0 in the case of Java 6u45). A server may reject one of these version 2.0 CLIENT-HELLO messages, or it may respond with an SSLv2, SSLv3, or TLSv1.0 ServerHello.
The current code in `client_simulation_sockets()` assumes that the server's response with be an SSLv3 or later ServerHello. So, it can support cases in which servers respond with an SSLv3 or TLSv1.0 ServerHello (once PR #800 is accepted to undo the mistake in PR #797), but not cases in which servers response with an SSLv2 ServerHello.
This PR adds code to `client_simulation_sockets()` to check if the server's response is an SSLv2 ServerHello, so that it can process such responses with `parse_sslv2_serverhello()` rather than `parse_tls_serverhello()`.
When a connection is made using SSLv3 or later, `run_client_simulation()` will show to the protocol and cipher selected for the connection. With this PR, if the connection is made using SSLv2, `run_client_simulation()` will just show "SSLv2." In the case of SSLv2, the ServerHello contains a list of all ciphers that the server and client have in common, and it is up to the client to choose one. So, if the client and server have more than one cipher in common, more information about the client would be needed to know which cipher it would choose.
2017-07-26 20:02:56 +02:00
if [ [ $save -eq 0 ] ] ; then
debugme echo "sending close_notify..."
if [ [ " $DETECTED_TLS_VERSION " = = "0300" ] ] ; then
socksend ",x15, x03, x00, x00, x02, x02, x00" 0
else
socksend ",x15, x03, x01, x00, x02, x02, x00" 0
fi
fi
2016-06-17 22:33:00 +02:00
2017-07-27 15:26:03 +02:00
if [ [ $DEBUG -ge 2 ] ] ; then
# see https://secure.wand.net.nz/trac/libprotoident/wiki/SSL
lines = $( count_lines " $( hexdump -C " $SOCK_REPLY_FILE " 2>$ERRFILE ) " )
tm_out " ( $lines lines returned) "
fi
client simulation and SSLv2 servers
The data for `run_client_simulation()` currently includes two clients that send version 2.0 CLIENT-HELLO messages (see Appendix E.2 of RFC 5246). Each of the CLIENT-HELLO messages advertises support for newer protocol versions (SSLv3 in the case of IE6XP and TLSv1.0 in the case of Java 6u45). A server may reject one of these version 2.0 CLIENT-HELLO messages, or it may respond with an SSLv2, SSLv3, or TLSv1.0 ServerHello.
The current code in `client_simulation_sockets()` assumes that the server's response with be an SSLv3 or later ServerHello. So, it can support cases in which servers respond with an SSLv3 or TLSv1.0 ServerHello (once PR #800 is accepted to undo the mistake in PR #797), but not cases in which servers response with an SSLv2 ServerHello.
This PR adds code to `client_simulation_sockets()` to check if the server's response is an SSLv2 ServerHello, so that it can process such responses with `parse_sslv2_serverhello()` rather than `parse_tls_serverhello()`.
When a connection is made using SSLv3 or later, `run_client_simulation()` will show to the protocol and cipher selected for the connection. With this PR, if the connection is made using SSLv2, `run_client_simulation()` will just show "SSLv2." In the case of SSLv2, the ServerHello contains a list of all ciphers that the server and client have in common, and it is up to the client to choose one. So, if the client and server have more than one cipher in common, more information about the client would be needed to know which cipher it would choose.
2017-07-26 20:02:56 +02:00
# determine the return value for higher level, so that they can tell what the result is
if [ [ $save -eq 1 ] ] || [ [ $lines -eq 1 ] ] ; then
ret = 1 # NOT available
else
ret = 0
fi
debugme tmln_out
2016-06-17 22:33:00 +02:00
fi
close_socket
TMPFILE = $SOCK_REPLY_FILE
tmpfile_handle $FUNCNAME .dd
return $ret
}
2016-01-13 10:21:01 +01:00
run_client_simulation( ) {
2016-01-23 19:18:33 +01:00
# Runs browser simulations. Browser capabilities gathered from:
2016-01-13 10:21:01 +01:00
# https://www.ssllabs.com/ssltest/clients.html on 10 jan 2016
local names = ( )
local short = ( )
local protos = ( )
local ciphers = ( )
local tlsvers = ( )
local sni = ( )
local warning = ( )
2016-06-17 22:33:00 +02:00
local handshakebytes = ( )
local lowest_protocol = ( )
local highest_protocol = ( )
2016-06-24 21:48:40 +02:00
local service = ( )
local minDhBits = ( )
local maxDhBits = ( )
local minRsaBits = ( )
local maxRsaBits = ( )
local minEcdsaBits = ( )
local requiresSha2 = ( )
2016-01-13 10:21:01 +01:00
local i = 0
2017-04-21 22:27:02 +02:00
local name tls proto cipher temp what_dh bits curve
local has_dh_bits using_sockets = true
2017-05-08 23:51:37 +02:00
local client_service
2017-07-03 22:24:02 +02:00
local options
2016-06-17 22:33:00 +02:00
2017-03-18 15:57:16 +01:00
# source the external file
2017-08-30 23:04:52 +02:00
. " $TESTSSL_INSTALL_DIR /etc/client-simulation.txt " 2>/dev/null
2017-03-18 15:57:16 +01:00
if [ [ $? -ne 0 ] ] ; then
2017-08-30 23:04:52 +02:00
prln_local_problem " couldn't find client simulation data in $TESTSSL_INSTALL_DIR /etc/client-simulation.txt "
2017-03-18 15:57:16 +01:00
return 1
fi
2017-05-08 23:51:37 +02:00
" $SSL_NATIVE " && using_sockets = false
2016-01-13 10:21:01 +01:00
2017-05-08 23:51:37 +02:00
if [ [ $SERVICE != "" ] ] ; then
client_service = " $SERVICE "
else
# Can we take the service from STARTTLS?
if [ [ -n " $STARTTLS_PROTOCOL " ] ] ; then
client_service = $( toupper " ${ STARTTLS_PROTOCOL %s } " ) # strip trailing 's' in ftp(s), smtp(s), pop3(s), etc
else
2017-09-22 10:32:28 +02:00
outln "Could not determine the protocol, only simulating generic clients."
2017-05-08 23:51:37 +02:00
client_service = "undetermined"
fi
2016-01-15 15:53:03 +01:00
fi
2016-01-13 10:21:01 +01:00
outln
2016-06-28 12:21:50 +02:00
if " $using_sockets " ; then
2017-04-28 20:35:07 +02:00
pr_headlineln " Running client simulations via sockets "
2016-06-28 12:21:50 +02:00
else
2017-04-28 20:35:07 +02:00
pr_headline " Running client simulations via openssl "
2017-08-30 21:09:52 +02:00
prln_warning " -- you shouldn't run this with \"--ssl-native\" as you will get false results"
fileout "client_simulation_Problem" "WARN" "You shouldn't run this with \"--ssl-native\" as you will get false results"
2016-06-28 12:21:50 +02:00
fi
2016-01-13 10:21:01 +01:00
outln
2017-05-22 23:04:58 +02:00
debugme echo
2017-05-08 23:51:37 +02:00
2017-04-21 22:27:02 +02:00
if " $WIDE " ; then
if [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] ; then
2017-05-22 23:04:58 +02:00
out " Browser Protocol Cipher Suite Name (OpenSSL) "
2017-04-21 22:27:02 +02:00
( " $using_sockets " || " $HAS_DH_BITS " ) && out "Forward Secrecy"
outln
2017-05-22 23:04:58 +02:00
out "--------------------------------------------------------------------------"
2017-04-21 22:27:02 +02:00
else
2017-05-22 23:04:58 +02:00
out " Browser Protocol Cipher Suite Name (RFC) "
2017-04-21 22:27:02 +02:00
( " $using_sockets " || " $HAS_DH_BITS " ) && out "Forward Secrecy"
outln
2017-05-22 23:04:58 +02:00
out "------------------------------------------------------------------------------------------"
2017-04-21 22:27:02 +02:00
fi
( " $using_sockets " || " $HAS_DH_BITS " ) && out "----------------------"
outln
fi
2016-01-13 10:21:01 +01:00
for name in " ${ short [@] } " ; do
2017-08-30 23:04:52 +02:00
if ${ current [i] } || " $ALL_CLIENTS " ; then
2017-05-08 23:51:37 +02:00
# for ANY we test this service or if the service we determined from STARTTLS matches
2017-10-09 15:13:46 +02:00
if [ [ " ${ service [i] } " = = "ANY" ] ] || [ [ " ${ service [i] } " = ~ $client_service ] ] ; then
2017-05-22 23:04:58 +02:00
out " $( printf -- "%-29s" " ${ names [i] } " ) "
2017-05-08 23:51:37 +02:00
if " $using_sockets " && [ [ -n " ${ handshakebytes [i] } " ] ] ; then
client_simulation_sockets " ${ handshakebytes [i] } "
2016-01-13 10:21:01 +01:00
sclient_success = $?
if [ [ $sclient_success -eq 0 ] ] ; then
2017-05-08 23:51:37 +02:00
if [ [ " 0x ${ DETECTED_TLS_VERSION } " -lt ${ lowest_protocol [i] } ] ] || \
[ [ " 0x ${ DETECTED_TLS_VERSION } " -gt ${ highest_protocol [i] } ] ] ; then
sclient_success = 1
fi
[ [ $sclient_success -eq 0 ] ] && cp " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " $TMPFILE >$ERRFILE
2016-01-13 10:21:01 +01:00
fi
2017-04-21 22:27:02 +02:00
else
2017-07-03 22:24:02 +02:00
options = " $( s_client_options " -cipher ${ ciphers [i] } ${ protos [i] } $STARTTLS $BUGS $PROXY -connect $NODEIP : $PORT ${ sni [i] } " ) "
debugme echo " $OPENSSL s_client $options </dev/null "
$OPENSSL s_client $options </dev/null >$TMPFILE 2>$ERRFILE
2017-05-08 23:51:37 +02:00
sclient_connect_successful $? $TMPFILE
sclient_success = $?
2017-04-21 22:27:02 +02:00
fi
2017-05-08 23:51:37 +02:00
if [ [ $sclient_success -eq 0 ] ] ; then
# If an ephemeral DH key was used, check that the number of bits is within range.
temp = $( awk -F': ' '/^Server Temp Key/ { print $2 }' " $TMPFILE " ) # extract line
2017-10-11 16:59:13 +02:00
what_dh = " ${ temp %%,* } "
2017-10-11 21:20:49 +02:00
bits = " ${ temp ##*, } "
2017-10-09 15:13:46 +02:00
# formatting
2017-10-11 21:20:49 +02:00
curve = " ${ temp #*, } "
if [ [ " $curve " = = " $bits " ] ] ; then
2017-05-08 23:51:37 +02:00
curve = ""
2017-10-11 21:20:49 +02:00
else
curve = " ${ curve %%,* } "
2017-05-08 23:51:37 +02:00
fi
bits = " ${ bits /bits/ } "
bits = " ${ bits // / } "
if [ [ " $what_dh " = = "X25519" ] ] || [ [ " $what_dh " = = "X448" ] ] ; then
curve = " $what_dh "
what_dh = "ECDH"
fi
if [ [ " $what_dh " = = "DH" ] ] ; then
[ [ ${ minDhBits [i] } -ne -1 ] ] && [ [ $bits -lt ${ minDhBits [i] } ] ] && sclient_success = 1
[ [ ${ maxDhBits [i] } -ne -1 ] ] && [ [ $bits -gt ${ maxDhBits [i] } ] ] && sclient_success = 1
fi
fi
if [ [ $sclient_success -ne 0 ] ] ; then
outln "No connection"
fileout " client_ ${ short [i] } " "INFO" " $( strip_spaces " ${ names [i] } " ) client simulation: No connection "
else
proto = $( get_protocol $TMPFILE )
# hack:
[ [ " $proto " = = TLSv1 ] ] && proto = "TLSv1.0"
[ [ " $proto " = = SSLv3 ] ] && proto = "SSLv3 "
if [ [ " $proto " = = TLSv1.2 ] ] && ( ! " $using_sockets " || [ [ -z " ${ handshakebytes [i] } " ] ] ) ; then
# OpenSSL reports TLS1.2 even if the connection is TLS1.1 or TLS1.0. Need to figure out which one it is...
for tls in ${ tlsvers [i] } ; do
2017-09-19 18:37:03 +02:00
options = " $( s_client_options " $tls -cipher ${ ciphers [i] } ${ protos [i] } $STARTTLS $BUGS $PROXY -connect $NODEIP : $PORT ${ sni [i] } " ) "
debugme echo " $OPENSSL s_client $options </dev/null "
$OPENSSL s_client $options </dev/null >$TMPFILE 2>$ERRFILE
2017-05-08 23:51:37 +02:00
sclient_connect_successful $? $TMPFILE
sclient_success = $?
if [ [ $sclient_success -eq 0 ] ] ; then
case " $tls " in
2017-05-22 23:04:58 +02:00
"-tls1_2" ) break ; ;
"-tls1_1" ) proto = "TLSv1.1"
break ; ;
"-tls1" ) proto = "TLSv1.0"
break ; ;
2017-05-08 23:51:37 +02:00
esac
fi
done
fi
cipher = $( get_cipher $TMPFILE )
if [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] && ( [ [ " $cipher " = = TLS_* ] ] || [ [ " $cipher " = = SSL_* ] ] ) ; then
cipher = " $( rfc2openssl " $cipher " ) "
[ [ -z " $cipher " ] ] && cipher = $( get_cipher $TMPFILE )
elif [ [ " $DISPLAY_CIPHERNAMES " = ~ rfc ] ] && [ [ " $cipher " != TLS_* ] ] && [ [ " $cipher " != SSL_* ] ] ; then
cipher = " $( openssl2rfc " $cipher " ) "
[ [ -z " $cipher " ] ] && cipher = $( get_cipher $TMPFILE )
fi
if ! " $WIDE " ; then
out " $proto $cipher "
elif [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] ; then
2017-05-22 23:04:58 +02:00
out " $( printf -- "%-7s %-34s" " $proto " " $cipher " ) "
2017-05-08 23:51:37 +02:00
else
2017-05-22 23:04:58 +02:00
out " $( printf -- "%-7s %-50s" " $proto " " $cipher " ) "
2017-05-08 23:51:37 +02:00
fi
if ! " $WIDE " ; then
" $using_sockets " && [ [ -n " ${ handshakebytes [i] } " ] ] && has_dh_bits = $HAS_DH_BITS && HAS_DH_BITS = true
" $HAS_DH_BITS " && read_dhbits_from_file $TMPFILE
" $using_sockets " && [ [ -n " ${ handshakebytes [i] } " ] ] && HAS_DH_BITS = $has_dh_bits
elif [ [ -n " $what_dh " ] ] ; then
[ [ -n " $curve " ] ] && curve = " ( $curve ) "
if [ [ " $what_dh " = = "ECDH" ] ] ; then
pr_ecdh_quality " $bits " " $( printf -- "%-12s" " $bits bit $what_dh " ) $curve "
else
pr_dh_quality " $bits " " $( printf -- "%-12s" " $bits bit $what_dh " ) $curve "
fi
elif " $HAS_DH_BITS " || ( " $using_sockets " && [ [ -n " ${ handshakebytes [i] } " ] ] ) ; then
out "No FS"
fi
outln
if [ [ -n " ${ warning [i] } " ] ] ; then
out " "
outln " ${ warning [i] } "
fi
fileout " client_ ${ short [i] } " "INFO" \
2017-08-30 21:09:52 +02:00
" $( strip_spaces " ${ names [i] } " ) client simulation: $proto $cipher ${ warning [i] } "
2017-05-08 23:51:37 +02:00
debugme cat $TMPFILE
fi
fi # correct service?
fi #current?
2016-01-15 15:53:03 +01:00
i = $(( i+1))
2016-01-13 10:21:01 +01:00
done
tmpfile_handle $FUNCNAME .txt
return 0
}
2015-10-11 23:07:16 +02:00
# generic function whether $1 is supported by s_client ($2: string to display)
2015-05-17 22:43:53 +02:00
locally_supported( ) {
2015-09-17 15:30:15 +02:00
[ [ -n " $2 " ] ] && out " $2 "
2016-12-20 20:02:29 +01:00
if $OPENSSL s_client " $1 " -connect x 2>& 1 | grep -aq "unknown option" ; then
2017-02-25 16:31:30 +01:00
prln_local_problem " $OPENSSL doesn't support \"s_client $1 \" "
2015-10-11 23:07:16 +02:00
return 7
2015-09-17 15:30:15 +02:00
fi
2015-10-11 23:07:16 +02:00
return 0
2015-05-17 22:43:53 +02:00
}
2015-06-28 13:52:42 +02:00
2016-01-23 19:18:33 +01:00
# the protocol check needs to be revamped. It sucks.
# 1) we need to have a variable where the results are being stored so that every other test doesn't have to do this again.
2015-10-11 23:07:16 +02:00
# 2) the code is too old and one can do that way better
# 3) HAS_SSL3/2 does already exist
2016-04-21 18:04:33 +02:00
# we should do what's available and faster (openssl vs. sockets). Keep in mind that the socket reply for SSLv2 returns the number # of ciphers!
2015-10-11 23:07:16 +02:00
#
# arg1: -ssl2|-ssl3|-tls1
# arg2: doesn't seem to be used in calling, seems to be a textstring with the protocol though
2015-06-28 13:52:42 +02:00
run_prototest_openssl( ) {
2015-09-17 15:30:15 +02:00
local -i ret = 0
2015-05-17 22:43:53 +02:00
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " -state $1 $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
2015-10-11 23:07:16 +02:00
sclient_connect_successful $? $TMPFILE
2015-09-17 15:30:15 +02:00
ret = $?
2017-07-26 22:37:50 +02:00
debugme egrep "error|failure" $ERRFILE | egrep -av "unable to get local|verify error"
2015-09-17 15:30:15 +02:00
if ! locally_supported " $1 " " $2 " ; then
ret = 7
2017-09-19 18:37:03 +02:00
else # try again without $PROXY
$OPENSSL s_client $( s_client_options " -state $1 $STARTTLS $BUGS -connect $NODEIP : $PORT $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
2015-10-11 23:07:16 +02:00
sclient_connect_successful $? $TMPFILE
2016-01-23 19:18:33 +01:00
ret = $?
2017-07-26 22:37:50 +02:00
debugme egrep "error|failure" $ERRFILE | egrep -av "unable to get local|verify error"
2015-10-11 23:07:16 +02:00
grep -aq "no cipher list" $TMPFILE && ret = 5 # <--- important indicator for SSL2 (maybe others, too)
2015-09-17 15:30:15 +02:00
fi
tmpfile_handle $FUNCNAME $1 .txt
return $ret
# 0: offered
# 1: not offered
# 5: protocol ok, but no cipher
# 7: no local support
2015-05-17 22:43:53 +02:00
}
2017-10-02 13:48:55 +02:00
# idempotent function to add SSL/TLS protocols. It should accelerate testing.
# PROTOS_OFFERED can be e.g. "ssl2:no ssl3:no tls1_2:yes" which means tha
# SSLv2 and SSLv3 was tested but not available, TLS 1.2 was tested and available
# TLS 1.0 and TLS 1.2 not tested yet
#
# arg1: protocol
# arg2: available (yes) or not (no)
2016-06-07 23:06:58 +02:00
add_tls_offered( ) {
2017-10-02 13:48:55 +02:00
if [ [ " $PROTOS_OFFERED " = ~ $1 : ] ] ; then
# the ":" is mandatory here (and @ other palces), otherwise e.g. tls1 will match tls1_2
:
else
PROTOS_OFFERED += " ${ 1 } : $2 "
fi
2016-06-07 23:06:58 +02:00
}
2017-10-02 13:48:55 +02:00
# function which checks whether SSLv2 - TLS 1.2 is being offereed, see add_tls_offered()
2016-06-09 11:04:40 +02:00
has_server_protocol( ) {
2017-10-10 22:00:47 +02:00
local proto
2017-10-02 13:48:55 +02:00
local proto_val_pair
2017-10-10 22:00:47 +02:00
case " $1 " in
04) proto = "tls1_3" ; ;
03) proto = "tls1_2" ; ;
02) proto = "tls1_1" ; ;
01) proto = "tls1" ; ;
00) proto = "ssl3" ; ;
*) proto = " $1 " ; ;
esac
if [ [ " $PROTOS_OFFERED " = ~ $proto : ] ] ; then
2017-10-02 13:48:55 +02:00
for proto_val_pair in $PROTOS_OFFERED ; do
2017-10-10 22:00:47 +02:00
if [ [ $proto_val_pair = ~ $proto : ] ] ; then
2017-10-02 13:48:55 +02:00
if [ [ ${ proto_val_pair #* : } = = "yes" ] ] ; then
echo 0
return 0
else
echo 1
return 0
fi
fi
done
else
# if empty echo 2, hinting to the caller to check at additional cost/connect
echo 2
2016-06-09 11:04:40 +02:00
return 0
fi
}
2015-05-17 22:43:53 +02:00
2015-10-11 23:07:16 +02:00
# the protocol check needs to be revamped. It sucks, see above
2015-06-23 21:54:47 +02:00
run_protocols( ) {
2015-09-17 15:30:15 +02:00
local using_sockets = true
2016-01-23 19:18:33 +01:00
local supported_no_ciph1 = "supported but couldn't detect a cipher (may need debugging)"
local supported_no_ciph2 = "supported but couldn't detect a cipher"
2016-05-06 21:12:53 +02:00
local latest_supported = "" # version.major and version.minor of highest version supported by the server.
local detected_version_string latest_supported_string
2016-08-11 20:16:33 +02:00
local lines nr_ciphers_detected
2017-11-02 22:13:54 +01:00
local tls13_ciphers_to_test = ""
local drafts_offered = ""
2017-07-28 18:07:29 +02:00
local -i ret
2015-09-17 15:30:15 +02:00
2015-10-15 14:15:07 +02:00
outln; pr_headline " Testing protocols "
2015-09-17 15:30:15 +02:00
2016-07-23 15:12:13 +02:00
if " $SSL_NATIVE " ; then
2015-09-17 15:30:15 +02:00
using_sockets = false
2017-02-25 16:31:30 +01:00
prln_underline "via native openssl"
2015-09-17 15:30:15 +02:00
else
2017-02-07 23:08:29 +01:00
using_sockets = true
2015-09-17 15:30:15 +02:00
if [ [ -n " $STARTTLS " ] ] ; then
2017-02-25 16:31:30 +01:00
prln_underline "via sockets "
2015-09-17 15:30:15 +02:00
else
2017-02-25 16:31:30 +01:00
prln_underline "via sockets except SPDY+HTTP2 "
2015-09-17 15:30:15 +02:00
fi
fi
2015-10-15 14:15:07 +02:00
outln
2015-09-17 15:30:15 +02:00
Remove test of version tolerance
PR #346 added a test for version tolerance to `run_protocols()`, but I think it may now be more appropriate to remove that test. Draft -16 of TLS 1.3, which was posted on September 22, changed the way that version negotiation is handled for TLS 1.3 and above. The current version tolerance test sends a ClientHello with the version field set to "03, 05", to represent a TLS 1.4 ClientHello. While this was consistent with RFC 5246 and with drafts of TLS 1.3 up to -15, draft -16 changed the version field to `legacy_version` and declared that its value should be "03, 03" for TLS 1.2 and above. (For TLS 1.3 and above a Supported Versions extension is included to inform the server which versions of TLS the client supports.) The change in draft -16 was made as a result of the problems with servers not handling version negotiation correctly.
Since the current draft suggests that a server should never be presented with a ClientHello with a version higher than "03, 03" (even for clients that support TLS versions higher than 1.2), it seems there is no reason to include the version tolerance test anymore.
For servers that do not support TLS 1.2, the additional checks that were added by PR #346 will already detect if the server cannot perform version negotiation correctly.
2016-10-11 17:01:04 +02:00
pr_bold " SSLv2 " ;
2016-07-23 15:12:13 +02:00
if ! " $SSL_NATIVE " ; then
2016-08-11 20:16:33 +02:00
sslv2_sockets
case $? in
7) # strange reply, couldn't convert the cipher spec length to a hex number
pr_cyan "strange v2 reply "
outln " (rerun with DEBUG >=2)"
[ [ $DEBUG -ge 3 ] ] && hexdump -C " $TEMPDIR / $NODEIP .sslv2_sockets.dd " | head -1
fileout "sslv2" "WARN" "SSLv2: received a strange SSLv2 reply (rerun with DEBUG>=2)"
; ;
1) # no sslv2 server hello returned, like in openlitespeed which returns HTTP!
2017-02-25 16:31:30 +01:00
prln_done_best "not offered (OK)"
2016-11-17 23:27:27 +01:00
fileout "sslv2" "OK" "SSLv2 is not offered"
2017-10-02 13:48:55 +02:00
add_tls_offered ssl2 no
2016-08-11 20:16:33 +02:00
; ;
0) # reset
2017-02-25 16:31:30 +01:00
prln_done_best "not offered (OK)"
2016-11-17 23:27:27 +01:00
fileout "sslv2" "OK" "SSLv2 is not offered"
2017-10-02 13:48:55 +02:00
add_tls_offered ssl2 no
2016-08-11 20:16:33 +02:00
; ;
3) # everything else
lines = $( count_lines " $( hexdump -C " $TEMPDIR / $NODEIP .sslv2_sockets.dd " 2>/dev/null) " )
2017-02-25 16:31:30 +01:00
[ [ " $DEBUG " -ge 2 ] ] && tm_out " ( $lines lines) "
2016-08-11 20:16:33 +02:00
if [ [ " $lines " -gt 1 ] ] ; then
nr_ciphers_detected = $(( V2_HELLO_CIPHERSPEC_LENGTH / 3 ))
2017-10-02 13:48:55 +02:00
add_tls_offered ssl2 yes
2016-08-11 20:16:33 +02:00
if [ [ 0 -eq " $nr_ciphers_detected " ] ] ; then
2017-02-25 16:31:30 +01:00
prln_svrty_high "supported but couldn't detect a cipher and vulnerable to CVE-2015-3197 " ;
2016-11-17 23:27:27 +01:00
fileout "sslv2" "HIGH" "SSLv2 is offered, vulnerable to CVE-2015-3197"
2016-08-11 20:16:33 +02:00
else
pr_svrty_critical "offered (NOT ok), also VULNERABLE to DROWN attack" ;
outln " -- $nr_ciphers_detected ciphers "
2016-11-17 23:27:27 +01:00
fileout "sslv2" "CRITICAL" " SSLv2 offered, vulnerable to DROWN attack. Detected ciphers: $nr_ciphers_detected "
2016-08-11 20:16:33 +02:00
fi
fi ; ;
esac
2017-02-25 16:31:30 +01:00
debugme tmln_out
2015-09-17 15:30:15 +02:00
else
run_prototest_openssl "-ssl2"
case $? in
2017-03-18 22:24:35 +01:00
0) prln_svrty_critical "offered (NOT ok)"
2016-11-17 23:27:27 +01:00
fileout "sslv2" "CRITICAL" "SSLv2 is offered"
2017-10-02 13:48:55 +02:00
add_tls_offered ssl2 yes
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
1) prln_done_best "not offered (OK)"
2016-11-17 23:27:27 +01:00
fileout "sslv2" "OK" "SSLv2 is not offered"
2017-10-02 13:48:55 +02:00
add_tls_offered ssl2 no
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
5) pr_svrty_high " CVE-2015-3197: $supported_no_ciph2 " ;
2016-10-28 15:30:07 +02:00
fileout "sslv2" "HIGH" " CVE-2015-3197: SSLv2 is $supported_no_ciph2 "
2017-10-02 13:48:55 +02:00
add_tls_offered ssl2 yes
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
7) fileout "sslv2" "INFO" "SSLv2 is not tested due to lack of local support"
2017-09-01 16:13:32 +02:00
; ; # no local support
2015-09-17 15:30:15 +02:00
esac
fi
Remove test of version tolerance
PR #346 added a test for version tolerance to `run_protocols()`, but I think it may now be more appropriate to remove that test. Draft -16 of TLS 1.3, which was posted on September 22, changed the way that version negotiation is handled for TLS 1.3 and above. The current version tolerance test sends a ClientHello with the version field set to "03, 05", to represent a TLS 1.4 ClientHello. While this was consistent with RFC 5246 and with drafts of TLS 1.3 up to -15, draft -16 changed the version field to `legacy_version` and declared that its value should be "03, 03" for TLS 1.2 and above. (For TLS 1.3 and above a Supported Versions extension is included to inform the server which versions of TLS the client supports.) The change in draft -16 was made as a result of the problems with servers not handling version negotiation correctly.
Since the current draft suggests that a server should never be presented with a ClientHello with a version higher than "03, 03" (even for clients that support TLS versions higher than 1.2), it seems there is no reason to include the version tolerance test anymore.
For servers that do not support TLS 1.2, the additional checks that were added by PR #346 will already detect if the server cannot perform version negotiation correctly.
2016-10-11 17:01:04 +02:00
pr_bold " SSLv3 " ;
2016-07-23 15:12:13 +02:00
if " $using_sockets " ; then
2015-09-17 15:30:15 +02:00
tls_sockets "00" " $TLS_CIPHER "
else
run_prototest_openssl "-ssl3"
fi
case $? in
2017-03-18 22:24:35 +01:00
0) prln_svrty_high "offered (NOT ok)"
2016-11-17 23:27:27 +01:00
fileout "sslv3" "HIGH" "SSLv3 is offered"
2016-05-06 21:12:53 +02:00
latest_supported = "0300"
latest_supported_string = "SSLv3"
2017-10-02 13:48:55 +02:00
add_tls_offered ssl3 yes
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
1) prln_done_best "not offered (OK)"
2016-11-17 23:27:27 +01:00
fileout "sslv3" "OK" "SSLv3 is not offered"
2017-10-02 13:48:55 +02:00
add_tls_offered ssl3 no
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
2) if [ [ " $DETECTED_TLS_VERSION " = = 03* ] ] ; then
2016-05-06 21:12:53 +02:00
detected_version_string = " TLSv1. $(( 0 x$DETECTED_TLS_VERSION - 0 x0301)) "
2017-02-25 16:31:30 +01:00
prln_svrty_critical " server responded with higher version number ( $detected_version_string ) than requested by client (NOT ok) "
2016-11-17 23:27:27 +01:00
fileout "sslv3" "CRITICAL" " SSLv3: server responded with higher version number ( $detected_version_string ) than requested by client "
2016-05-06 21:12:53 +02:00
else
2017-09-01 16:13:32 +02:00
if [ [ ${# DETECTED_TLS_VERSION } -eq 4 ] ] ; then
prln_svrty_critical " server responded with version number ${ DETECTED_TLS_VERSION : 0 : 2 } . ${ DETECTED_TLS_VERSION : 2 : 2 } (NOT ok) "
fileout "sslv3" "CRITICAL" " SSLv3: server responded with version number ${ DETECTED_TLS_VERSION : 0 : 2 } . ${ DETECTED_TLS_VERSION : 2 : 2 } "
else
prln_svrty_medium " strange, server ${ DETECTED_TLS_VERSION } "
fileout "sslv3" "MEDIUM" " SSLv3: strange, server ${ DETECTED_TLS_VERSION } "
fi
2016-05-06 21:12:53 +02:00
fi
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
5) pr_svrty_high " $supported_no_ciph2 "
2016-10-28 15:30:07 +02:00
fileout "sslv3" "HIGH" " SSLv3 is $supported_no_ciph1 "
2016-01-23 19:18:33 +01:00
outln "(may need debugging)"
2017-10-02 13:48:55 +02:00
add_tls_offered ssl3 yes
2016-10-28 15:30:07 +02:00
; ;
2017-08-28 18:25:45 +02:00
7) prln_warning "SSLv3 seems locally not supported"
fileout "sslv3" "WARN" "SSLv3 is not tested due to lack of local support"
2017-09-01 16:13:32 +02:00
; ; # no local support
2015-09-17 15:30:15 +02:00
esac
Remove test of version tolerance
PR #346 added a test for version tolerance to `run_protocols()`, but I think it may now be more appropriate to remove that test. Draft -16 of TLS 1.3, which was posted on September 22, changed the way that version negotiation is handled for TLS 1.3 and above. The current version tolerance test sends a ClientHello with the version field set to "03, 05", to represent a TLS 1.4 ClientHello. While this was consistent with RFC 5246 and with drafts of TLS 1.3 up to -15, draft -16 changed the version field to `legacy_version` and declared that its value should be "03, 03" for TLS 1.2 and above. (For TLS 1.3 and above a Supported Versions extension is included to inform the server which versions of TLS the client supports.) The change in draft -16 was made as a result of the problems with servers not handling version negotiation correctly.
Since the current draft suggests that a server should never be presented with a ClientHello with a version higher than "03, 03" (even for clients that support TLS versions higher than 1.2), it seems there is no reason to include the version tolerance test anymore.
For servers that do not support TLS 1.2, the additional checks that were added by PR #346 will already detect if the server cannot perform version negotiation correctly.
2016-10-11 17:01:04 +02:00
pr_bold " TLS 1 " ;
2016-07-23 15:12:13 +02:00
if " $using_sockets " ; then
2015-09-17 15:30:15 +02:00
tls_sockets "01" " $TLS_CIPHER "
else
run_prototest_openssl "-tls1"
fi
case $? in
2017-03-18 22:24:35 +01:00
0) outln "offered"
2016-01-23 23:33:17 +01:00
fileout "tls1" "INFO" "TLSv1.0 is offered"
2016-05-06 21:12:53 +02:00
latest_supported = "0301"
latest_supported_string = "TLSv1.0"
2017-10-02 13:48:55 +02:00
add_tls_offered tls1 yes
2016-06-09 11:04:40 +02:00
; ; # nothing wrong with it -- per se
2017-03-18 22:24:35 +01:00
1) out "not offered"
2017-10-02 13:48:55 +02:00
add_tls_offered tls1 no
2016-07-23 15:12:13 +02:00
if ! " $using_sockets " || [ [ -z $latest_supported ] ] ; then
2016-05-06 21:12:53 +02:00
outln
fileout "tls1" "INFO" "TLSv1.0 is not offered" # neither good or bad
else
2017-02-25 16:31:30 +01:00
prln_svrty_critical " -- connection failed rather than downgrading to $latest_supported_string (NOT ok) "
2016-11-17 23:27:27 +01:00
fileout "tls1" "CRITICAL" " TLSv1.0: connection failed rather than downgrading to $latest_supported_string "
2016-05-06 21:12:53 +02:00
fi
; ;
2017-03-18 22:24:35 +01:00
2) pr_svrty_medium "not offered"
2017-10-02 13:48:55 +02:00
add_tls_offered tls1 no
2016-05-06 21:12:53 +02:00
if [ [ " $DETECTED_TLS_VERSION " = = "0300" ] ] ; then
2017-07-26 22:37:50 +02:00
[ [ $DEBUG -ge 1 ] ] && tm_out " -- downgraded"
2016-05-06 21:12:53 +02:00
outln
2016-05-31 15:51:13 +02:00
fileout "tls1" "MEDIUM" "TLSv1.0 is not offered, and downgraded to SSL"
2016-05-06 21:12:53 +02:00
elif [ [ " $DETECTED_TLS_VERSION " = = 03* ] ] ; then
detected_version_string = " TLSv1. $(( 0 x$DETECTED_TLS_VERSION - 0 x0301)) "
2017-02-25 16:31:30 +01:00
prln_svrty_critical " -- server responded with higher version number ( $detected_version_string ) than requested by client "
2016-11-17 23:27:27 +01:00
fileout "tls1" "CRITICAL" " TLSv1.0: server responded with higher version number ( $detected_version_string ) than requested by client "
2016-05-06 21:12:53 +02:00
else
2017-09-01 16:13:32 +02:00
if [ [ ${# DETECTED_TLS_VERSION } -eq 4 ] ] ; then
prln_svrty_critical " server responded with version number ${ DETECTED_TLS_VERSION : 0 : 2 } . ${ DETECTED_TLS_VERSION : 2 : 2 } (NOT ok) "
fileout "tls1" "CRITICAL" " TLSv1.0: server responded with version number ${ DETECTED_TLS_VERSION : 0 : 2 } . ${ DETECTED_TLS_VERSION : 2 : 2 } "
else
prln_svrty_medium " -- strange, server ${ DETECTED_TLS_VERSION } "
fileout "tls1" "MEDIUM" " TLSv1.0: server ${ DETECTED_TLS_VERSION } "
fi
2016-05-06 21:12:53 +02:00
fi
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
5) outln " $supported_no_ciph1 " # protocol ok, but no cipher
2017-08-28 18:25:45 +02:00
fileout "tls1" "INFO" " TLSv1.0 is $supported_no_ciph1 "
2017-10-02 13:48:55 +02:00
add_tls_offered tls1 yes
2016-01-23 19:18:33 +01:00
; ;
2017-08-28 18:25:45 +02:00
7) prln_warning "TLSv1.0 seems locally not supported"
fileout "tlsv1" "WARN" "TLSv1.0 is not tested due to lack of local support"
; ; # no local support
2015-09-17 15:30:15 +02:00
esac
Remove test of version tolerance
PR #346 added a test for version tolerance to `run_protocols()`, but I think it may now be more appropriate to remove that test. Draft -16 of TLS 1.3, which was posted on September 22, changed the way that version negotiation is handled for TLS 1.3 and above. The current version tolerance test sends a ClientHello with the version field set to "03, 05", to represent a TLS 1.4 ClientHello. While this was consistent with RFC 5246 and with drafts of TLS 1.3 up to -15, draft -16 changed the version field to `legacy_version` and declared that its value should be "03, 03" for TLS 1.2 and above. (For TLS 1.3 and above a Supported Versions extension is included to inform the server which versions of TLS the client supports.) The change in draft -16 was made as a result of the problems with servers not handling version negotiation correctly.
Since the current draft suggests that a server should never be presented with a ClientHello with a version higher than "03, 03" (even for clients that support TLS versions higher than 1.2), it seems there is no reason to include the version tolerance test anymore.
For servers that do not support TLS 1.2, the additional checks that were added by PR #346 will already detect if the server cannot perform version negotiation correctly.
2016-10-11 17:01:04 +02:00
pr_bold " TLS 1.1 " ;
2016-07-23 15:12:13 +02:00
if " $using_sockets " ; then
2015-09-17 15:30:15 +02:00
tls_sockets "02" " $TLS_CIPHER "
else
run_prototest_openssl "-tls1_1"
2016-01-23 19:18:33 +01:00
fi
2015-09-17 15:30:15 +02:00
case $? in
2017-03-18 22:24:35 +01:00
0) outln "offered"
2016-01-23 23:33:17 +01:00
fileout "tls1_1" "INFO" "TLSv1.1 is offered"
2016-05-06 21:12:53 +02:00
latest_supported = "0302"
latest_supported_string = "TLSv1.1"
2017-10-02 13:48:55 +02:00
add_tls_offered tls1_1 yes
2016-01-23 19:18:33 +01:00
; ; # nothing wrong with it
2017-03-18 22:24:35 +01:00
1) out "not offered"
2017-10-02 13:48:55 +02:00
add_tls_offered tls1_1 no
2016-07-23 15:12:13 +02:00
if ! " $using_sockets " || [ [ -z $latest_supported ] ] ; then
2016-05-06 21:12:53 +02:00
outln
fileout "tls1_1" "INFO" "TLSv1.1 is not offered" # neither good or bad
else
2017-02-25 16:31:30 +01:00
prln_svrty_critical " -- connection failed rather than downgrading to $latest_supported_string "
2016-11-17 23:27:27 +01:00
fileout "tls1_1" "CRITICAL" " TLSv1.1: connection failed rather than downgrading to $latest_supported_string "
2016-05-06 21:12:53 +02:00
fi
; ;
2017-03-18 22:24:35 +01:00
2) out "not offered"
2017-10-02 13:48:55 +02:00
add_tls_offered tls1_1 no
2016-05-06 21:12:53 +02:00
if [ [ " $DETECTED_TLS_VERSION " = = " $latest_supported " ] ] ; then
2017-07-26 22:37:50 +02:00
[ [ $DEBUG -ge 1 ] ] && tm_out " -- downgraded"
2016-05-06 21:12:53 +02:00
outln
2016-11-17 23:27:27 +01:00
fileout "tls1_1" "CRITICAL" "TLSv1.1 is not offered, and downgraded to a weaker protocol"
2016-05-06 21:12:53 +02:00
elif [ [ " $DETECTED_TLS_VERSION " = = "0300" ] ] && [ [ " $latest_supported " = = "0301" ] ] ; then
2017-02-25 16:31:30 +01:00
prln_svrty_critical " -- server supports TLSv1.0, but downgraded to SSLv3 (NOT ok)"
2016-11-17 23:27:27 +01:00
fileout "tls1_1" "CRITICAL" "TLSv1.1 is not offered, and downgraded to SSLv3 rather than TLSv1.0"
2016-05-06 21:12:53 +02:00
elif [ [ " $DETECTED_TLS_VERSION " = = 03* ] ] && [ [ 0x$DETECTED_TLS_VERSION -gt 0x0302 ] ] ; then
detected_version_string = " TLSv1. $(( 0 x$DETECTED_TLS_VERSION - 0 x0301)) "
2017-02-25 16:31:30 +01:00
prln_svrty_critical " -- server responded with higher version number ( $detected_version_string ) than requested by client (NOT ok) "
2016-11-17 23:27:27 +01:00
fileout "tls1_1" "CRITICAL" " TLSv1.1 is not offered, server responded with higher version number ( $detected_version_string ) than requested by client "
2016-05-06 21:12:53 +02:00
else
2017-09-01 16:13:32 +02:00
if [ [ ${# DETECTED_TLS_VERSION } -eq 4 ] ] ; then
prln_svrty_critical " server responded with version number ${ DETECTED_TLS_VERSION : 0 : 2 } . ${ DETECTED_TLS_VERSION : 2 : 2 } (NOT ok) "
fileout "tls1_1" "CRITICAL" " TLSv1.1: server responded with version number ${ DETECTED_TLS_VERSION : 0 : 2 } . ${ DETECTED_TLS_VERSION : 2 : 2 } "
else
prln_svrty_medium " -- strange, server ${ DETECTED_TLS_VERSION } "
fileout "tls1_1" "MEDIUM" " TLSv1.1: server ${ DETECTED_TLS_VERSION } "
fi
2016-05-06 21:12:53 +02:00
fi
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
5) outln " $supported_no_ciph1 "
2017-08-28 18:25:45 +02:00
fileout "tls1_1" "INFO" " TLSv1.1 is $supported_no_ciph1 "
2017-10-02 13:48:55 +02:00
add_tls_offered tls1_1 yes
2017-08-28 18:25:45 +02:00
; ; # protocol ok, but no cipher
7) prln_warning "TLSv1.1 seems locally not supported"
fileout "tls1_1" "WARN" "TLSv1.1 is not tested due to lack of local support"
; ; # no local support
2015-09-17 15:30:15 +02:00
esac
Remove test of version tolerance
PR #346 added a test for version tolerance to `run_protocols()`, but I think it may now be more appropriate to remove that test. Draft -16 of TLS 1.3, which was posted on September 22, changed the way that version negotiation is handled for TLS 1.3 and above. The current version tolerance test sends a ClientHello with the version field set to "03, 05", to represent a TLS 1.4 ClientHello. While this was consistent with RFC 5246 and with drafts of TLS 1.3 up to -15, draft -16 changed the version field to `legacy_version` and declared that its value should be "03, 03" for TLS 1.2 and above. (For TLS 1.3 and above a Supported Versions extension is included to inform the server which versions of TLS the client supports.) The change in draft -16 was made as a result of the problems with servers not handling version negotiation correctly.
Since the current draft suggests that a server should never be presented with a ClientHello with a version higher than "03, 03" (even for clients that support TLS versions higher than 1.2), it seems there is no reason to include the version tolerance test anymore.
For servers that do not support TLS 1.2, the additional checks that were added by PR #346 will already detect if the server cannot perform version negotiation correctly.
2016-10-11 17:01:04 +02:00
pr_bold " TLS 1.2 " ;
2017-02-07 23:08:29 +01:00
if " $using_sockets " ; then
2015-09-17 15:30:15 +02:00
tls_sockets "03" " $TLS12_CIPHER "
2017-07-28 18:07:29 +02:00
ret = $?
if [ [ $ret -ne 0 ] ] ; then
tls_sockets "03" " $TLS12_CIPHER_2ND_TRY "
[ [ $? -eq 0 ] ] && ret = 0
2017-07-31 12:59:36 +02:00
# see #807 and #806
2017-07-28 18:07:29 +02:00
fi
2015-09-17 15:30:15 +02:00
else
run_prototest_openssl "-tls1_2"
2017-07-28 18:07:29 +02:00
ret = $?
2016-01-23 19:18:33 +01:00
fi
2017-07-28 18:07:29 +02:00
case $ret in
2017-03-18 22:24:35 +01:00
0) prln_done_best "offered (OK)"
2016-11-17 23:27:27 +01:00
fileout "tls1_2" "OK" "TLSv1.2 is offered"
2016-05-06 21:12:53 +02:00
latest_supported = "0303"
latest_supported_string = "TLSv1.2"
2017-10-02 13:48:55 +02:00
add_tls_offered tls1_2 yes
2016-01-23 19:18:33 +01:00
; ; # GCM cipher in TLS 1.2: very good!
2017-03-18 22:24:35 +01:00
1) pr_svrty_medium "not offered"
2017-10-02 13:48:55 +02:00
add_tls_offered tls1_2 no
2017-02-14 22:43:46 +01:00
if ! " $using_sockets " || [ [ -z $latest_supported ] ] ; then
2017-02-15 17:47:11 +01:00
outln
2016-05-31 15:51:13 +02:00
fileout "tls1_2" "MEDIUM" "TLSv1.2 is not offered" # no GCM, penalty
2016-05-06 21:12:53 +02:00
else
2017-02-25 16:31:30 +01:00
prln_svrty_critical " -- connection failed rather than downgrading to $latest_supported_string "
2016-12-27 20:14:20 +01:00
fileout "tls1_2" "CRITICAL" " TLSv1.2: connection failed rather than downgrading to $latest_supported_string "
2016-05-06 21:12:53 +02:00
fi
; ;
2017-03-18 22:24:35 +01:00
2) pr_svrty_medium "not offered"
2017-10-02 13:48:55 +02:00
add_tls_offered tls1_2 no
2016-05-06 21:12:53 +02:00
if [ [ " $DETECTED_TLS_VERSION " = = "0300" ] ] ; then
detected_version_string = "SSLv3"
elif [ [ " $DETECTED_TLS_VERSION " = = 03* ] ] ; then
detected_version_string = " TLSv1. $(( 0 x$DETECTED_TLS_VERSION - 0 x0301)) "
fi
if [ [ " $DETECTED_TLS_VERSION " = = " $latest_supported " ] ] ; then
2017-07-26 22:37:50 +02:00
[ [ $DEBUG -ge 1 ] ] && tm_out " -- downgraded"
2016-05-06 21:12:53 +02:00
outln
2016-05-31 15:51:13 +02:00
fileout "tls1_2" "MEDIUM" "TLSv1.2 is not offered and downgraded to a weaker protocol"
2016-05-06 21:12:53 +02:00
elif [ [ " $DETECTED_TLS_VERSION " = = 03* ] ] && [ [ 0x$DETECTED_TLS_VERSION -lt 0x$latest_supported ] ] ; then
2017-02-25 16:31:30 +01:00
prln_svrty_critical " -- server supports $latest_supported_string , but downgraded to $detected_version_string "
2016-11-17 23:27:27 +01:00
fileout "tls1_2" "CRITICAL" " TLSv1.2 is not offered, and downgraded to $detected_version_string rather than $latest_supported_string "
2016-05-06 21:12:53 +02:00
elif [ [ " $DETECTED_TLS_VERSION " = = 03* ] ] && [ [ 0x$DETECTED_TLS_VERSION -gt 0x0303 ] ] ; then
2017-02-25 16:31:30 +01:00
prln_svrty_critical " -- server responded with higher version number ( $detected_version_string ) than requested by client "
2016-11-17 23:27:27 +01:00
fileout "tls1_2" "CRITICAL" " TLSv1.2 is not offered, server responded with higher version number ( $detected_version_string ) than requested by client "
2016-05-06 21:12:53 +02:00
else
2017-09-01 16:13:32 +02:00
if [ [ ${# DETECTED_TLS_VERSION } -eq 4 ] ] ; then
prln_svrty_critical " server responded with version number ${ DETECTED_TLS_VERSION : 0 : 2 } . ${ DETECTED_TLS_VERSION : 2 : 2 } (NOT ok) "
fileout "tls1_2" "CRITICAL" " TLSv1.2: server responded with version number ${ DETECTED_TLS_VERSION : 0 : 2 } . ${ DETECTED_TLS_VERSION : 2 : 2 } "
else
prln_svrty_medium " -- strange, server ${ DETECTED_TLS_VERSION } "
fileout "tls1_2" "MEDIUM" " TLSv1.2: server ${ DETECTED_TLS_VERSION } "
fi
2016-05-06 21:12:53 +02:00
fi
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
5) outln " $supported_no_ciph1 "
2017-08-28 18:25:45 +02:00
fileout "tls1_2" "INFO" " TLSv1.2 is $supported_no_ciph1 "
2017-10-02 13:48:55 +02:00
add_tls_offered tls1_2 yes
2016-01-23 19:18:33 +01:00
; ; # protocol ok, but no cipher
2017-08-28 18:25:45 +02:00
7) prln_warning "TLSv1.2 seems locally not supported"
fileout "tls1_2" "WARN" "TLSv1.2 is not tested due to lack of local support"
; ; # no local support
2015-09-17 15:30:15 +02:00
esac
2017-11-02 22:13:54 +01:00
pr_bold " TLS 1.3 " ;
if " $using_sockets " ; then
# Need to ensure that at most 128 ciphers are included in ClientHello.
# If the TLSv1.2 test was successful, then use the 5 TLSv1.3 ciphers
# plus the cipher selected in the TLSv1.2 test. If the TLSv1.2 test was
# not successful, then just use the 5 TLSv1.3 ciphers plus the list of
# ciphers used in all of the previous tests ($TLS_CIPHER).
if [ [ $ret -eq 0 ] ] || [ [ $req -eq 2 ] ] ; then
tls13_ciphers_to_test = " $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " ) "
if [ [ " $tls13_ciphers_to_test " = = TLS_* ] ] || [ [ " $tls13_ciphers_to_test " = = SSL_* ] ] ; then
tls13_ciphers_to_test = " $( rfc2hexcode " $tls13_ciphers_to_test " ) "
else
tls13_ciphers_to_test = " $( openssl2hexcode " $tls13_ciphers_to_test " ) "
fi
fi
if [ [ ${# tls13_ciphers_to_test } -eq 9 ] ] ; then
tls13_ciphers_to_test = " $TLS13_CIPHER , ${ tls13_ciphers_to_test : 2 : 2 } , ${ tls13_ciphers_to_test : 7 : 2 } "
else
tls13_ciphers_to_test = " $TLS13_CIPHER , $TLS_CIPHER "
fi
tls_sockets "04" " $tls13_ciphers_to_test "
else
run_prototest_openssl "-tls1_3"
fi
case $? in
0) if ! " $using_sockets " ; then
outln "offered (OK)"
fileout "tls1_3" "OK" "TLSv1.3 is offered"
else
tls_sockets "04" " $TLS13_CIPHER " "" "00, 2b, 00, 03, 02, 7f, 12"
2017-11-05 20:30:18 +01:00
[ [ $? -eq 0 ] ] && drafts_offered = "draft 18"
2017-11-02 22:13:54 +01:00
tls_sockets "04" " $TLS13_CIPHER " "" "00, 2b, 00, 03, 02, 7f, 13"
if [ [ $? -eq 0 ] ] ; then
[ [ -n " $drafts_offered " ] ] && drafts_offered += ", "
2017-11-05 20:30:18 +01:00
drafts_offered += "draft 19"
2017-11-02 22:13:54 +01:00
fi
tls_sockets "04" " $TLS13_CIPHER " "" "00, 2b, 00, 03, 02, 7f, 14"
if [ [ $? -eq 0 ] ] ; then
[ [ -n " $drafts_offered " ] ] && drafts_offered += ", "
2017-11-05 20:30:18 +01:00
drafts_offered += "draft 20"
2017-11-02 22:13:54 +01:00
fi
tls_sockets "04" " $TLS13_CIPHER " "" "00, 2b, 00, 03, 02, 7f, 15"
if [ [ $? -eq 0 ] ] ; then
[ [ -n " $drafts_offered " ] ] && drafts_offered += ", "
2017-11-05 20:30:18 +01:00
drafts_offered += "draft 21"
2017-11-02 22:13:54 +01:00
fi
tls_sockets "04" " $TLS13_CIPHER " "" "00, 2b, 00, 03, 02, 03, 04"
if [ [ $? -eq 0 ] ] ; then
[ [ -n " $drafts_offered " ] ] && drafts_offered += ", "
2017-11-05 20:30:18 +01:00
drafts_offered += "final"
2017-11-02 22:13:54 +01:00
fi
if [ [ -n " $drafts_offered " ] ] ; then
2017-11-05 20:30:18 +01:00
pr_done_best "offered (OK)" ; outln " : $drafts_offered "
fileout "tls1_3" "OK" " TLSv1.3 offered: $drafts_offered "
2017-11-02 22:13:54 +01:00
else
prln_warning "Unexpected results"
fileout "tls1_3" "WARN" "TLSv1.3 unexpected results"
fi
fi
latest_supported = "0304"
latest_supported_string = "TLSv1.3"
add_tls_offered tls1_3 yes
; ;
1) out "not offered"
if ! " $using_sockets " || [ [ -z $latest_supported ] ] ; then
outln
fileout "tls1_3" "INFO" "TLSv1.3 is not offered"
else
prln_svrty_critical " -- connection failed rather than downgrading to $latest_supported_string "
fileout "tls1_3" "CRITICAL" " TLSv1.3: connection failed rather than downgrading to $latest_supported_string "
fi
add_tls_offered tls1_3 no
; ;
2) out "not offered"
if [ [ " $DETECTED_TLS_VERSION " = = "0300" ] ] ; then
detected_version_string = "SSLv3"
elif [ [ " $DETECTED_TLS_VERSION " = = 03* ] ] ; then
detected_version_string = " TLSv1. $(( 0 x$DETECTED_TLS_VERSION - 0 x0301)) "
fi
if [ [ " $DETECTED_TLS_VERSION " = = " $latest_supported " ] ] ; then
[ [ $DEBUG -eq 1 ] ] && out " -- downgraded"
outln
fileout "tls1_3" "INFO" "TLSv1.3 is not offered and downgraded to a weaker protocol"
elif [ [ " $DETECTED_TLS_VERSION " = = 03* ] ] && [ [ 0x$DETECTED_TLS_VERSION -lt 0x$latest_supported ] ] ; then
prln_svrty_critical " -- server supports $latest_supported_string , but downgraded to $detected_version_string "
fileout "tls1_3" "CRITICAL" " TLSv1.3 is not offered, and downgraded to $detected_version_string rather than $latest_supported_string "
elif [ [ " $DETECTED_TLS_VERSION " = = 03* ] ] && [ [ 0x$DETECTED_TLS_VERSION -gt 0x0304 ] ] ; then
prln_svrty_critical " -- server responded with higher version number ( $detected_version_string ) than requested by client "
fileout "tls1_3" "CRITICAL" " TLSv1.3 is not offered, server responded with higher version number ( $detected_version_string ) than requested by client "
else
prln_svrty_critical " -- server responded with version number ${ DETECTED_TLS_VERSION : 0 : 2 } . ${ DETECTED_TLS_VERSION : 2 : 2 } "
fileout "tls1_3" "CRITICAL" " TLSv1.3: server responded with version number ${ DETECTED_TLS_VERSION : 0 : 2 } . ${ DETECTED_TLS_VERSION : 2 : 2 } "
fi
add_tls_offered tls1_3 no
; ;
5) outln " $supported_no_ciph1 "
fileout "tls1_3" "INFO" " TLSv1.3 is $supported_no_ciph1 "
add_tls_offered tls1_3 yes
; ; # protocol ok, but no cipher
7) prln_warning "TLSv1.3 seems locally not supported"
fileout "tls1_3" "INFO" "TLSv1.3 is not tested due to lack of local support"
; ; # no local support
esac
2017-07-30 22:46:17 +02:00
debugme echo " PROTOS_OFFERED: $PROTOS_OFFERED "
if [ [ -z " $PROTOS_OFFERED " ] ] ; then
outln
ignore_no_or_lame "You should not proceed as no protocol was detected. If you still really really want to, say \"YES\"" "YES"
[ [ $? -ne 0 ] ] && exit -2
fi
2015-09-17 15:30:15 +02:00
return 0
2015-05-17 22:43:53 +02:00
}
2017-04-12 21:00:08 +02:00
#TODO: work with fixed lists here --> atm ok, as sockets are preferred. If there would be a single function for testing: yes.
2015-05-17 22:43:53 +02:00
run_std_cipherlists( ) {
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
local hexc hexcode strength
2017-04-12 21:00:08 +02:00
local using_sockets = true
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
local -i i
2017-01-12 19:09:11 +01:00
local null_ciphers = "c0,10, c0,06, c0,15, c0,0b, c0,01, c0,3b, c0,3a, c0,39, 00,b9, 00,b8, 00,b5, 00,b4, 00,2e, 00,2d, 00,b1, 00,b0, 00,2c, 00,3b, 00,02, 00,01, 00,82, 00,83, ff,87, 00,ff"
local sslv2_null_ciphers = ""
local anon_ciphers = "c0,19, 00,a7, 00,6d, 00,3a, 00,c5, 00,89, c0,47, c0,5b, c0,85, c0,18, 00,a6, 00,6c, 00,34, 00,bf, 00,9b, 00,46, c0,46, c0,5a, c0,84, c0,16, 00,18, c0,17, 00,1b, 00,1a, 00,19, 00,17, c0,15, 00,ff"
local sslv2_anon_ciphers = ""
local adh_ciphers = "00,a7, 00,6d, 00,3a, 00,c5, 00,89, c0,47, c0,5b, c0,85, 00,a6, 00,6c, 00,34, 00,bf, 00,9b, 00,46, c0,46, c0,5a, c0,84, 00,18, 00,1b, 00,1a, 00,19, 00,17, 00,ff"
local sslv2_adh_ciphers = ""
2017-04-12 21:00:08 +02:00
# ~ grep -i EXP etc/cipher-mapping.txt
2017-01-12 19:09:11 +01:00
local exp_ciphers = "00,63, 00,62, 00,61, 00,65, 00,64, 00,60, 00,14, 00,11, 00,19, 00,08, 00,06, 00,27, 00,26, 00,2a, 00,29, 00,0b, 00,0e, 00,17, 00,03, 00,28, 00,2b, 00,ff"
local sslv2_exp_ciphers = "04,00,80, 02,00,80"
2017-04-12 21:00:08 +02:00
# ~ egrep -w '64|56' etc/cipher-mapping.txt | grep -v export
2017-01-12 19:09:11 +01:00
local low_ciphers = "00,15, 00,12, 00,0f, 00,0c, 00,09, 00,1e, 00,22, fe,fe, ff,e1, 00,ff"
local sslv2_low_ciphers = "08,00,80, 06,00,40"
2017-07-25 20:38:02 +02:00
# ~ egrep -w 128 etc/cipher-mapping.txt | egrep -v "Au=None|AEAD|ARIA|Camellia|AES"
2017-01-12 19:09:11 +01:00
local medium_ciphers = "00,9a, 00,99, 00,98, 00,97, 00,96, 00,07, 00,21, 00,25, c0,11, c0,07, 00,66, c0,0c, c0,02, 00,05, 00,04, 00,92, 00,8a, 00,20, 00,24, c0,33, 00,8e, 00,ff"
2017-04-12 21:00:08 +02:00
local sslv2_medium_ciphers = "01,00,80, 03,00,80, 05,00,80"
# ~ egrep -w '3DES' etc/cipher-mapping.txt
2017-01-12 19:09:11 +01:00
local tdes_ciphers = "c0,12, c0,08, c0,1c, c0,1b, c0,1a, 00,16, 00,13, 00,10, 00,0d, c0,0d, c0,03, 00,0a, 00,93, 00,8b, 00,1f, 00,23, c0,34, 00,8f, fe,ff, ff,e0, 00,ff"
local sslv2_tdes_ciphers = "07,00,c0"
2017-08-01 13:23:21 +02:00
# ~ equivalent to 'egrep -w "GOST|128|256" etc/cipher-mapping.txt | grep -v '=None' | egrep -vw 'RC4|AEAD|IDEA|SEED|RC2'. Attention: 127 ciphers currently
local high_ciphers = "c0,28, c0,24, c0,14, c0,0a, c0,22, c0,21, c0,20, 00,b7, 00,b3, 00,91, c0,9b, c0,99, c0,97, 00,af, c0,95, 00,6b, 00,6a, 00,69, 00,68, 00,39, 00,38, 00,37, 00,36, c0,77, c0,73, 00,c4, 00,c3, 00,c2, 00,c1, 00,88, 00,87, 00,86, 00,85, c0,2a, c0,26, c0,0f, c0,05, c0,79, c0,75, 00,3d, 00,35, 00,c0, c0,38, c0,36, 00,84, 00,95, 00,8d, c0,3d, c0,3f, c0,41, c0,43, c0,45, c0,49, c0,4b, c0,4d, c0,4f, c0,65, c0,67, c0,69, c0,71, 00,80, 00,81, ff,00, ff,01, ff,02, ff,03, ff,85, c0,27, c0,23, c0,13, c0,09, c0,1f, c0,1e, c0,1d, 00,67, 00,40, 00,3f, 00,3e, 00,33, 00,32, 00,31, 00,30, c0,76, c0,72, 00,be, 00,bd, 00,bc, 00,bb, 00,45, 00,44, 00,43, 00,42, c0,29, c0,25, c0,0e, c0,04, c0,78, c0,74, 00,3c, 00,2f, 00,ba, c0,37, c0,35, 00,b6, 00,b2, 00,90, 00,41, c0,9a, c0,98, c0,96, 00,ae, c0,94, 00,94, 00,8c, c0,3c, c0,3e, c0,40, c0,42, c0,44, c0,48, c0,4a, c0,4c, c0,4e, c0,64, c0,66, c0,68, c0,70"
2017-04-12 21:00:08 +02:00
# no SSLv2 here and in strong
# ~ equivalent to 'grep AEAD etc/cipher-mapping.txt | grep -v Au=None'
2017-10-10 22:00:47 +02:00
local strong_ciphers = "13,01, 13,02, 13,03, 13,04, 13,05, cc,14, cc,13, cc,15, c0,30, c0,2c, 00,a5, 00,a3, 00,a1, 00,9f, cc,a9, cc,a8, cc,aa, c0,af, c0,ad, c0,a3, c0,9f, 00,ad, 00,ab, cc,ae, cc,ad, cc,ac, c0,ab, c0,a7, c0,32, c0,2e, 00,9d, c0,a1, c0,9d, 00,a9, cc,ab, c0,a9, c0,a5, c0,51, c0,53, c0,55, c0,57, c0,59, c0,5d, c0,5f, c0,61, c0,63, c0,6b, c0,6d, c0,6f, c0,7b, c0,7d, c0,7f, c0,81, c0,83, c0,87, c0,89, c0,8b, c0,8d, c0,8f, c0,91, c0,93, 16,b7, 16,b8, 16,b9, 16,ba, c0,2f, c0,2b, 00,a4, 00,a2, 00,a0, 00,9e, c0,ae, c0,ac, c0,a2, c0,9e, 00,ac, 00,aa, c0,aa, c0,a6, c0,a0, c0,9c, 00,a8, c0,a8, c0,a4, c0,31, c0,2d, 00,9c, c0,50, c0,52, c0,54, c0,56, c0,58, c0,5c, c0,5e, c0,60, c0,62, c0,6a, c0,6c, c0,6e, c0,7a, c0,7c, c0,7e, c0,80, c0,82, c0,86, c0,88, c0,8a, c0,8c, c0,8e, c0,90, c0,92, 00,ff"
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
" $SSL_NATIVE " && using_sockets = false
2017-01-12 19:09:11 +01:00
if ! " $using_sockets " ; then
2017-04-12 21:00:08 +02:00
null_ciphers = "" ; anon_ciphers = ""
exp_ciphers = "" ; low_ciphers = "" medium_ciphers = "" ;
tdes_ciphers = "" ; high_ciphers = "" ; strong_ciphers = ""
sslv2_null_ciphers = "" ; sslv2_anon_ciphers = ""
sslv2_exp_ciphers = "" ; sslv2_low_ciphers = ""
sslv2_medium_ciphers = "" ; sslv2_tdes_ciphers = ""
Use sockets for run_std_cipherlists()
This PR change `run_std_cipherlists()` to use sockets. As noted in isse #554, I have some questions about the definitions of the cipher lists, but I wrote the code so that the ciphers that are tested when using sockets are the same as those that are tested when using OpenSSL. For a few of the cipherlists, the sockets version tests a few additional ciphers; but these are ciphers that are not supported by OpenSSL, and whose definitions are consistent with the ciphers that OpenSSL includes.
As written, `std_cipherlists` will use sockets for testing by default, except in two cases:
* If the `$SSL_NATIVE` is true, then only OpenSSL is used, and if OpenSSL doesn't support any ciphers in the cipherlist, then the test is skipped.
* If `$FAST` is true (but `$SSL_NATIVE` is false), then OpenSSL is used whenever it supports at least one cipher from the cipherlist, and `tls_sockets()` (or `sslv2_sockets()`) is only used when OpenSSL doesn't support any ciphers from the cipherlist.
2016-12-22 21:46:01 +01:00
fi
2015-09-17 15:30:15 +02:00
outln
2017-04-12 21:00:08 +02:00
pr_headlineln " Testing ~standard cipher categories "
2015-10-15 14:15:07 +02:00
outln
2017-04-12 21:00:08 +02:00
# argv[1]: cipher list to test in OpenSSL syntax (see ciphers(1ssl) or run 'openssl ciphers -v/-V)'
# argv[2]: string on console / HTML or "finding"
# argv[3]: rating whether ok to offer
# argv[4]: string to be appended for fileout
# argv[5]: non-SSLv2 cipher list to test (hexcodes), if using sockets
# argv[6]: SSLv2 cipher list to test (hexcodes), if using sockets
std_cipherlists 'NULL:eNULL' " NULL ciphers (no encryption) " -2 "NULL" " $null_ciphers " " $sslv2_null_ciphers "
std_cipherlists 'aNULL:ADH' " Anonymous NULL Ciphers (no authentication)" -2 "aNULL" " $anon_ciphers " " $sslv2_anon_ciphers "
std_cipherlists 'EXPORT:!ADH:!NULL' " Export ciphers (w/o ADH+NULL) " -2 "EXPORT" " $exp_ciphers " " $sslv2_exp_ciphers "
std_cipherlists 'LOW:DES:!ADH:!EXP:!NULL' " LOW: 64 Bit + DES encryption (w/o export) " -2 "DES+64Bit" " $low_ciphers " " $sslv2_low_ciphers "
std_cipherlists 'MEDIUM:!aNULL:!AES:!CAMELLIA:!ARIA:!CHACHA20:!3DES' \
2017-06-29 17:58:58 +02:00
" Weak 128 Bit ciphers (SEED, IDEA, RC[2,4])" -1 "128Bit" " $medium_ciphers " " $sslv2_medium_ciphers "
2017-04-12 21:00:08 +02:00
std_cipherlists '3DES:!aNULL:!ADH' " Triple DES Ciphers (Medium) " 0 "3DES" " $tdes_ciphers " " $sslv2_tdes_ciphers "
std_cipherlists 'HIGH:!NULL:!aNULL:!DES:!3DES:!AESGCM:!CHACHA20:!AESGCM:!CamelliaGCM:!AESCCM8:!AESCCM' \
2017-06-29 17:58:58 +02:00
" High encryption (AES+Camellia, no AEAD) " 1 "HIGH" " $high_ciphers " ""
2017-04-12 21:00:08 +02:00
std_cipherlists 'AESGCM:CHACHA20:AESGCM:CamelliaGCM:AESCCM8:AESCCM' \
2017-06-29 17:58:58 +02:00
" Strong encryption (AEAD ciphers) " 2 "STRONG" " $strong_ciphers " ""
2015-10-15 14:15:07 +02:00
outln
2015-09-17 15:30:15 +02:00
return 0
2015-05-17 22:43:53 +02:00
}
2017-02-07 20:25:41 +01:00
pr_dh_quality( ) {
local bits = " $1 "
local string = " $2 "
if [ [ " $bits " -le 600 ] ] ; then
pr_svrty_critical " $string "
elif [ [ " $bits " -le 800 ] ] ; then
pr_svrty_high " $string "
elif [ [ " $bits " -le 1280 ] ] ; then
pr_svrty_medium " $string "
elif [ [ " $bits " -ge 2048 ] ] ; then
pr_done_good " $string "
else
out " $string "
2017-04-10 14:45:39 +02:00
fi
2017-02-07 20:25:41 +01:00
}
pr_ecdh_quality( ) {
local bits = " $1 "
local string = " $2 "
if [ [ " $bits " -le 80 ] ] ; then # has that ever existed?
pr_svrty_critical " $string "
elif [ [ " $bits " -le 108 ] ] ; then # has that ever existed?
pr_svrty_high " $string "
elif [ [ " $bits " -le 163 ] ] ; then
pr_svrty_medium " $string "
elif [ [ " $bits " -le 193 ] ] ; then # hmm, according to https://wiki.openssl.org/index.php/Elliptic_Curve_Cryptography it should ok
pr_svrty_low " $string " # but openssl removed it https://github.com/drwetter/testssl.sh/issues/299#issuecomment-220905416
elif [ [ " $bits " -le 224 ] ] ; then
out " $string "
elif [ [ " $bits " -gt 224 ] ] ; then
pr_done_good " $string "
else
out " $string "
fi
}
2017-02-09 17:36:24 +01:00
pr_ecdh_curve_quality( ) {
curve = " $1 "
local -i bits = 0
2017-02-14 16:18:27 +01:00
2017-02-09 17:36:24 +01:00
case " $curve " in
2017-02-14 16:18:27 +01:00
"sect163k1" ) bits = 163 ; ;
"sect163r1" ) bits = 162 ; ;
"sect163r2" ) bits = 163 ; ;
"sect193r1" ) bits = 193 ; ;
"sect193r2" ) bits = 193 ; ;
"sect233k1" ) bits = 232 ; ;
"sect233r1" ) bits = 233 ; ;
"sect239k1" ) bits = 238 ; ;
"sect283k1" ) bits = 281 ; ;
"sect283r1" ) bits = 282 ; ;
"sect409k1" ) bits = 407 ; ;
"sect409r1" ) bits = 409 ; ;
"sect571k1" ) bits = 570 ; ;
"sect571r1" ) bits = 570 ; ;
"secp160k1" ) bits = 161 ; ;
"secp160r1" ) bits = 161 ; ;
"secp160r2" ) bits = 161 ; ;
"secp192k1" ) bits = 192 ; ;
"prime192v1" ) bits = 192 ; ;
"secp224k1" ) bits = 225 ; ;
"secp224r1" ) bits = 224 ; ;
"secp256k1" ) bits = 256 ; ;
"prime256v1" ) bits = 256 ; ;
"secp384r1" ) bits = 384 ; ;
"secp521r1" ) bits = 521 ; ;
"brainpoolP256r1" ) bits = 256 ; ;
"brainpoolP384r1" ) bits = 384 ; ;
"brainpoolP512r1" ) bits = 512 ; ;
"X25519" ) bits = 253 ; ;
"X448" ) bits = 448 ; ;
2017-02-09 17:36:24 +01:00
esac
2017-02-13 15:14:22 +01:00
pr_ecdh_quality " $bits " " $curve "
2017-02-09 17:36:24 +01:00
}
2015-05-26 12:51:10 +02:00
2017-02-17 17:20:11 +01:00
# Print $2 based on the quality of the cipher in $1. If $2 is empty, print $1.
# The return value is an indicator of the quality of the cipher in $1:
# 0 = $1 is empty
# 1 = pr_svrty_critical, 2 = pr_svrty_high, 3 = pr_svrty_medium, 4 = pr_svrty_low
2017-04-10 14:45:39 +02:00
# 5 = neither good nor bad, 6 = pr_done_good, 7 = pr_done_best
2017-02-17 17:20:11 +01:00
pr_cipher_quality( ) {
local cipher = " $1 "
local text = " $2 "
[ [ -z " $1 " ] ] && return 0
[ [ -z " $text " ] ] && text = " $cipher "
2017-02-27 16:34:04 +01:00
2017-02-17 17:20:11 +01:00
if [ [ " $cipher " != TLS_* ] ] && [ [ " $cipher " != SSL_* ] ] ; then
# This must be the OpenSSL name for a cipher
if [ [ $TLS_NR_CIPHERS -eq 0 ] ] ; then
# We have the OpenSSL name and can't convert it to the RFC name
case " $cipher " in
*NULL*| *EXP*)
pr_svrty_critical " $text "
return 1
; ;
*RC4*)
pr_svrty_high " $text "
return 2
; ;
*CBC*)
pr_svrty_medium " $text "
return 3
; ; # FIXME BEAST: We miss some CBC ciphers here, need to work w/ a list
*GCM*| *CHACHA20*)
pr_done_best " $text "
return 7
; ; #best ones
ECDHE*AES*)
pr_svrty_low " $text "
return 4
; ; # it's CBC. --> lucky13
*)
out " $text "
return 5
; ;
esac
fi
cipher = " $( openssl2rfc " $cipher " ) "
fi
case " $cipher " in
*NULL*| *EXP*| *RC2*| *_DES_*| *_DES40_*)
pr_svrty_critical " $text "
return 1
; ;
*RC4*)
pr_svrty_high " $text "
return 2
; ;
*ECDHE*AES*CBC*)
pr_svrty_low " $text "
return 4
; ;
*CBC*)
pr_svrty_medium " $text "
return 3
; ;
*GCM*| *CHACHA20*)
pr_done_best " $text "
return 7
; ;
*)
out " $text "
return 5
; ;
esac
}
2017-10-11 17:47:00 +02:00
# arg1: file with input for grepping the type of ephemeral DH key (DH ECDH)
read_dhtype_from_file( ) {
local temp kx
temp = $( awk -F': ' '/^Server Temp Key/ { print $2 }' " $1 " ) # extract line
kx = " Kx= ${ temp %%,* } "
[ [ " $kx " = = "Kx=X25519" ] ] && kx = "Kx=ECDH"
[ [ " $kx " = = "Kx=X448" ] ] && kx = "Kx=ECDH"
tm_out " $kx "
return 0
}
2017-04-14 11:26:01 +02:00
# arg1: certificate file
read_sigalg_from_file( ) {
2017-10-27 19:07:04 +02:00
$OPENSSL x509 -noout -text -in " $1 " 2>/dev/null | awk -F':' '/Signature Algorithm/ { print $2; exit; }'
2017-04-14 11:26:01 +02:00
}
2015-05-25 15:10:09 +02:00
# arg1: file with input for grepping the bit length for ECDH/DHE
2015-09-22 20:09:26 +02:00
# arg2: whether to print warning "old fart" or not (empty: no)
2015-05-25 21:14:59 +02:00
read_dhbits_from_file( ) {
2017-01-13 16:28:48 +01:00
local bits what_dh temp curve = ""
2015-09-17 15:30:15 +02:00
local add = ""
2017-02-07 23:08:29 +01:00
local old_fart = " (your $OPENSSL cannot show DH bits) "
2015-09-17 15:30:15 +02:00
2015-09-22 20:09:26 +02:00
temp = $( awk -F': ' '/^Server Temp Key/ { print $2 }' " $1 " ) # extract line
2017-10-11 16:59:13 +02:00
what_dh = " ${ temp %%,* } "
2017-10-11 21:20:49 +02:00
bits = " ${ temp ##*, } "
curve = " ${ temp #*, } "
if [ [ " $curve " = = " $bits " ] ] ; then
curve = ""
2017-01-13 16:28:48 +01:00
else
2017-10-11 21:20:49 +02:00
curve = " ${ curve %%,* } "
2017-01-13 16:28:48 +01:00
fi
2017-03-27 17:35:45 +02:00
bits = " ${ bits /bits/ } "
bits = " ${ bits // / } "
2015-09-17 15:30:15 +02:00
2016-11-08 16:10:14 +01:00
if [ [ " $what_dh " = = "X25519" ] ] || [ [ " $what_dh " = = "X448" ] ] ; then
2017-01-13 16:28:48 +01:00
curve = " $what_dh "
2016-11-08 16:10:14 +01:00
what_dh = "ECDH"
fi
2017-02-07 20:25:41 +01:00
if [ [ -z " $2 " ] ] ; then
if [ [ -n " $curve " ] ] ; then
debugme echo " > $HAS_DH_BITS | $what_dh ( $curve )| $bits < "
else
debugme echo " > $HAS_DH_BITS | $what_dh | $bits < "
fi
2017-01-13 16:28:48 +01:00
fi
2015-10-03 00:14:52 +02:00
[ [ -n " $what_dh " ] ] && HAS_DH_BITS = true # FIX 190
2016-03-05 21:07:49 +01:00
if [ [ -z " $what_dh " ] ] && ! " $HAS_DH_BITS " ; then
2017-02-07 20:25:41 +01:00
if [ [ " $2 " = = "string" ] ] ; then
2017-02-25 16:31:30 +01:00
tm_out " $old_fart "
2017-02-07 20:25:41 +01:00
elif [ [ -z " $2 " ] ] ; then
2016-03-05 21:07:49 +01:00
pr_warning " $old_fart "
2015-09-17 15:30:15 +02:00
fi
return 0
fi
2017-02-07 20:25:41 +01:00
if [ [ " $2 " = = "quiet" ] ] ; then
2017-02-25 16:31:30 +01:00
tm_out " $bits "
2017-02-07 20:25:41 +01:00
return 0
fi
[ [ -z " $2 " ] ] && [ [ -n " $bits " ] ] && out ", "
2015-09-17 15:30:15 +02:00
if [ [ $what_dh = = "DH" ] ] || [ [ $what_dh = = "EDH" ] ] ; then
2017-02-07 20:25:41 +01:00
add = "bit DH"
[ [ -n " $curve " ] ] && add += " ( $curve ) "
if [ [ " $2 " = = "string" ] ] ; then
2017-02-25 16:31:30 +01:00
tm_out " , $bits $add "
2015-09-17 15:30:15 +02:00
else
2017-02-07 20:25:41 +01:00
pr_dh_quality " $bits " " $bits $add "
2015-09-17 15:30:15 +02:00
fi
# https://wiki.openssl.org/index.php/Elliptic_Curve_Cryptography, http://www.keylength.com/en/compare/
elif [ [ $what_dh = = "ECDH" ] ] ; then
2017-02-07 20:25:41 +01:00
add = "bit ECDH"
[ [ -n " $curve " ] ] && add += " ( $curve ) "
if [ [ " $2 " = = "string" ] ] ; then
2017-02-25 16:31:30 +01:00
tm_out " , $bits $add "
2015-09-17 15:30:15 +02:00
else
2017-02-07 20:25:41 +01:00
pr_ecdh_quality " $bits " " $bits $add "
2015-09-17 15:30:15 +02:00
fi
fi
return 0
2015-05-25 15:10:09 +02:00
}
2017-04-22 15:39:18 +02:00
# arg1: ID or empty. if empty resumption by ticket will be tested
# return: 0: it has resumption, 1:nope, 2: can't tell
2017-04-21 11:31:42 +02:00
sub_session_resumption( ) {
2017-10-30 18:41:19 +01:00
local ret ret1 ret2
2017-04-22 15:39:18 +02:00
local tmpfile = $( mktemp $TEMPDIR /session_resumption.$NODEIP .XXXXXX)
local sess_data = $( mktemp $TEMPDIR /sub_session_data_resumption.$NODEIP .XXXXXX)
local -a rw_line
if [ [ " $1 " = = ID ] ] ; then
local byID = true
local addcmd = "-no_ticket"
else
local byID = false
local addcmd = ""
fi
2017-10-30 18:41:19 +01:00
" $CLIENT_AUTH " && return 2
2017-04-22 22:14:06 +02:00
" $HAS_NO_SSL2 " && addcmd += " -no_ssl2" || addcmd += " $OPTIMAL_PROTO "
2017-04-22 15:39:18 +02:00
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI $addcmd -sess_out $sess_data " ) </dev/null & >/dev/null
2017-10-30 18:41:19 +01:00
ret1 = $?
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI $addcmd -sess_in $sess_data " ) </dev/null >$tmpfile 2>$ERRFILE
2017-10-30 18:41:19 +01:00
ret2 = $?
debugme echo " $ret1 , $ret2 , [[ -s " $sess_data " ]]"
2017-04-22 15:39:18 +02:00
# now get the line and compare the numbers read" and "writen" as a second criteria.
rw_line = " $( awk '/^SSL handshake has read/ { print $5" "$(NF-1) }' " $tmpfile " ) "
rw_line = ( $rw_line )
if [ [ " ${ rw_line [0] } " -gt " ${ rw_line [1] } " ] ] ; then
new_sid2 = true
else
new_sid2 = false
fi
debugme echo " ${ rw_line [0] } , ${ rw_line [1] } "
2017-10-30 18:41:19 +01:00
# grep -aq "^New" "$tmpfile" && new_sid=true || new_sid=false
2017-04-22 15:39:18 +02:00
grep -aq "^Reused" " $tmpfile " && new_sid = false || new_sid = true
if " $new_sid2 " && " $new_sid " ; then
debugme echo -n "No session resumption "
ret = 1
elif ! " $new_sid2 " && ! " $new_sid " ; then
debugme echo -n "Session resumption "
ret = 0
else
2017-10-30 18:41:19 +01:00
debugme echo -n " unclear status: $ret1 , $ret2 , $new_sid , $new_sid2 -- "
ret = 7
2017-04-22 15:39:18 +02:00
fi
if [ [ $DEBUG -ge 2 ] ] ; then
" $byID " && echo "byID" || echo "by ticket"
fi
" $byID " && \
tmpfile_handle $FUNCNAME .byID.log $tmpfile || \
tmpfile_handle $FUNCNAME .byticket.log $tmpfile
return $ret
2017-04-21 11:31:42 +02:00
}
2015-07-22 13:11:20 +02:00
run_server_preference( ) {
2017-02-28 18:33:17 +01:00
local cipher1 cipher2 prev_cipher = ""
2017-02-13 22:07:25 +01:00
local default_cipher default_cipher_ossl default_proto
2017-04-14 11:26:01 +02:00
local limitedsense supported_sslv2_ciphers
2015-09-17 15:30:15 +02:00
local -a cipher proto
2017-10-18 22:37:35 +02:00
local proto_ossl proto_txt proto_hex cipherlist i
2016-12-19 22:56:12 +01:00
local -i ret = 0 j
2015-09-21 14:03:48 +02:00
local list_fwd = "DES-CBC3-SHA:RC4-MD5:DES-CBC-SHA:RC4-SHA:AES128-SHA:AES128-SHA256:AES256-SHA:ECDHE-RSA-AES128-SHA:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-RSA-AES128-SHA:ECDH-RSA-AES256-SHA:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:DHE-DSS-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:AES256-SHA256"
2015-09-17 15:30:15 +02:00
# now reversed offline via tac, see https://github.com/thomassa/testssl.sh/commit/7a4106e839b8c3033259d66697893765fc468393 :
2015-09-26 22:44:33 +02:00
local list_reverse = "AES256-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDH-RSA-AES256-SHA:ECDH-RSA-AES128-SHA:ECDH-RSA-DES-CBC3-SHA:DHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA:ECDHE-RSA-AES128-SHA:AES256-SHA:AES128-SHA256:AES128-SHA:RC4-SHA:DES-CBC-SHA:RC4-MD5:DES-CBC3-SHA"
2017-04-14 11:26:01 +02:00
local has_cipher_order = false
2017-09-19 18:37:03 +02:00
local addcmd = "" addcmd2 = ""
2016-12-19 22:56:12 +01:00
local using_sockets = true
" $SSL_NATIVE " && using_sockets = false
2016-01-23 19:18:33 +01:00
2015-10-15 14:15:07 +02:00
outln
pr_headlineln " Testing server preferences "
outln
2015-09-17 15:30:15 +02:00
pr_bold " Has server cipher order? "
2017-09-19 18:37:03 +02:00
if [ [ " $OPTIMAL_PROTO " = = "-ssl2" ] ] ; then
addcmd = " $OPTIMAL_PROTO "
else
# the supplied openssl will send an SSLv2 ClientHello if $SNI is empty
# and the -no_ssl2 isn't provided.
addcmd = " -no_ssl2 $SNI "
2016-09-12 21:54:51 +02:00
fi
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS -cipher $list_fwd $BUGS -connect $NODEIP : $PORT $PROXY $addcmd " ) </dev/null 2>$ERRFILE >$TMPFILE
2015-10-11 23:07:16 +02:00
if ! sclient_connect_successful $? $TMPFILE && [ [ -z " $STARTTLS_PROTOCOL " ] ] ; then
2016-03-05 21:07:49 +01:00
pr_warning "no matching cipher in this list found (pls report this): "
2015-09-17 15:30:15 +02:00
outln " $list_fwd . "
2017-04-14 11:26:01 +02:00
tmpfile_handle $FUNCNAME .txt
return 6
2016-01-23 23:33:17 +01:00
fileout "order_bug" "WARN" " Could not determine server cipher order, no matching cipher in this list found (pls report this): $list_fwd "
2015-09-17 15:30:15 +02:00
elif [ [ -n " $STARTTLS_PROTOCOL " ] ] ; then
# now it still could be that we hit this bug: https://github.com/drwetter/testssl.sh/issues/188
# workaround is to connect with a protocol
2017-02-25 16:31:30 +01:00
debugme tm_out "(workaround #188) "
2016-01-23 19:18:33 +01:00
determine_optimal_proto $STARTTLS_PROTOCOL
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $STARTTLS_OPTIMAL_PROTO -cipher $list_fwd $BUGS -connect $NODEIP : $PORT $PROXY $addcmd2 " ) </dev/null 2>$ERRFILE >$TMPFILE
2015-10-11 23:07:16 +02:00
if ! sclient_connect_successful $? $TMPFILE ; then
2016-03-05 21:07:49 +01:00
pr_warning "no matching cipher in this list found (pls report this): "
2015-09-17 15:30:15 +02:00
outln " $list_fwd . "
2016-01-23 23:33:17 +01:00
fileout "order_bug" "WARN" " Could not determine server cipher order, no matching cipher in this list found (pls report this): $list_fwd "
2017-04-14 11:26:01 +02:00
tmpfile_handle $FUNCNAME .txt
return 6
2015-09-17 15:30:15 +02:00
fi
fi
2017-04-14 11:26:01 +02:00
cipher1 = $( get_cipher $TMPFILE ) # cipher1 from 1st serverhello
if [ [ -n " $STARTTLS_OPTIMAL_PROTO " ] ] ; then
2017-09-19 18:37:03 +02:00
addcmd2 = " $STARTTLS_OPTIMAL_PROTO $SNI "
2017-04-14 11:26:01 +02:00
else
if [ [ " $OPTIMAL_PROTO " = = "-ssl2" ] ] ; then
addcmd2 = " $OPTIMAL_PROTO "
2017-09-19 18:37:03 +02:00
else
addcmd2 = " -no_ssl2 $SNI "
2015-09-17 15:30:15 +02:00
fi
2017-04-14 11:26:01 +02:00
fi
2015-09-17 15:30:15 +02:00
2017-04-14 11:26:01 +02:00
# second client hello with reverse list
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS -cipher $list_reverse $BUGS -connect $NODEIP : $PORT $PROXY $addcmd2 " ) </dev/null 2>>$ERRFILE >$TMPFILE
2017-04-14 11:26:01 +02:00
# first handshake worked above so no error handling here
cipher2 = $( get_cipher $TMPFILE ) # cipher2 from 2nd serverhello
2015-09-17 15:30:15 +02:00
2017-04-14 11:26:01 +02:00
if [ [ " $cipher1 " != " $cipher2 " ] ] ; then
# server used the different ends (ciphers) from the client hello
pr_svrty_high "nope (NOT ok)"
limitedsense = " (limited sense as client will pick)"
fileout "order" "HIGH" "Server does NOT set a cipher order"
else
pr_done_best "yes (OK)"
has_cipher_order = true
limitedsense = ""
fileout "order" "OK" "Server sets a cipher order"
fi
debugme tm_out " $cipher1 | $cipher2 "
outln
pr_bold " Negotiated protocol "
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $addcmd " ) </dev/null 2>>$ERRFILE >$TMPFILE
2017-04-14 11:26:01 +02:00
if ! sclient_connect_successful $? $TMPFILE ; then
# 2 second try with $OPTIMAL_PROTO especially for intolerant IIS6 servers:
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $OPTIMAL_PROTO $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) </dev/null 2>>$ERRFILE >$TMPFILE
2017-04-14 11:26:01 +02:00
sclient_connect_successful $? $TMPFILE || pr_warning "Handshake error!"
fi
default_proto = $( get_protocol $TMPFILE )
case " $default_proto " in
2017-10-18 22:37:35 +02:00
*TLSv1.3)
prln_done_best $default_proto
fileout "order_proto" "OK" "Default protocol TLS1.3"
; ;
2017-04-14 11:26:01 +02:00
*TLSv1.2)
prln_done_best $default_proto
fileout "order_proto" "OK" "Default protocol TLS1.2"
; ;
*TLSv1.1)
prln_done_good $default_proto
fileout "order_proto" "OK" "Default protocol TLS1.1"
; ;
*TLSv1)
outln $default_proto
fileout "order_proto" "INFO" "Default protocol TLS1.0"
; ;
*SSLv2)
prln_svrty_critical $default_proto
fileout "order_proto" "CRITICAL" "Default protocol SSLv2"
; ;
*SSLv3)
prln_svrty_critical $default_proto
fileout "order_proto" "CRITICAL" "Default protocol SSLv3"
; ;
"" )
pr_warning "default proto empty"
if [ [ $OSSL_VER = = 1.0.2* ] ] ; then
outln " (Hint: if IIS6 give OpenSSL 1.0.1 a try)"
fileout "order_proto" "WARN" "Default protocol empty (Hint: if IIS6 give OpenSSL 1.0.1 a try)"
else
fileout "order_proto" "WARN" "Default protocol empty"
fi
; ;
*)
pr_warning " FIXME line $LINENO : $default_proto "
fileout "order_proto" "WARN" " FIXME line $LINENO : $default_proto "
; ;
esac
pr_bold " Negotiated cipher "
default_cipher_ossl = $( get_cipher $TMPFILE )
if [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] ; then
default_cipher = " $default_cipher_ossl "
else
default_cipher = " $( openssl2rfc " $default_cipher_ossl " ) "
[ [ -z " $default_cipher " ] ] && default_cipher = " $default_cipher_ossl "
fi
pr_cipher_quality " $default_cipher "
case $? in
1) fileout "order_cipher" "CRITICAL" " Default cipher: $default_cipher $( read_dhbits_from_file " $TMPFILE " "string" ) $limitedsense "
; ;
2) fileout "order_cipher" "HIGH" " Default cipher: $default_cipher $( read_dhbits_from_file " $TMPFILE " "string" ) $limitedsense "
; ;
3) fileout "order_cipher" "MEDIUM" " Default cipher: $default_cipher $( read_dhbits_from_file " $TMPFILE " "string" ) $limitedsense "
; ;
6| 7) fileout "order_cipher" "OK" " Default cipher: $default_cipher $( read_dhbits_from_file " $TMPFILE " "string" ) $limitedsense "
; ; # best ones
4) fileout "order_cipher" "LOW" " Default cipher: $default_cipher $( read_dhbits_from_file " $TMPFILE " "string" ) (cbc) $limitedsense "
; ; # it's CBC. --> lucky13
0) pr_warning "default cipher empty" ;
if [ [ $OSSL_VER = = 1.0.2* ] ] ; then
out " (Hint: if IIS6 give OpenSSL 1.0.1 a try)"
fileout "order_cipher" "WARN" " Default cipher empty (Hint: if IIS6 give OpenSSL 1.0.1 a try) $limitedsense "
else
fileout "order_cipher" "WARN" " Default cipher empty $limitedsense "
fi
; ;
*) fileout "order_cipher" "INFO" " Default cipher: $default_cipher $( read_dhbits_from_file " $TMPFILE " "string" ) $limitedsense "
; ;
esac
read_dhbits_from_file " $TMPFILE "
outln " $limitedsense "
if " $has_cipher_order " ; then
cipher_pref_check
else
pr_bold " Negotiated cipher per proto" ; outln " $limitedsense "
i = 1
2017-10-18 22:37:35 +02:00
for proto_ossl in ssl2 ssl3 tls1 tls1_1 tls1_2 tls1_3; do
if [ [ $proto_ossl = = ssl2 ] ] && ! " $HAS_SSL2 " ; then
2017-04-14 11:26:01 +02:00
if ! " $using_sockets " || [ [ $TLS_NR_CIPHERS -eq 0 ] ] ; then
out " (SSLv2: " ; pr_local_problem " $OPENSSL doesn't support \"s_client -ssl2\" " ; outln ")" ;
continue
2016-01-23 19:18:33 +01:00
else
2017-04-14 11:26:01 +02:00
sslv2_sockets "" "true"
if [ [ $? -eq 3 ] ] && [ [ " $V2_HELLO_CIPHERSPEC_LENGTH " -ne 0 ] ] ; then
# Just arbitrarily pick the first cipher in the cipher-mapping.txt list.
proto[ i] = "SSLv2"
supported_sslv2_ciphers = " $( grep "Supported cipher: " " $TEMPDIR / $NODEIP .parse_sslv2_serverhello.txt " ) "
for ( ( j = 0; j < TLS_NR_CIPHERS; j++ ) ) ; do
if [ [ " ${ TLS_CIPHER_SSLVERS [j] } " = = "SSLv2" ] ] ; then
cipher1 = " ${ TLS_CIPHER_HEXCODE [j] } "
cipher1 = " $( tolower " x ${ cipher1 : 2 : 2 } ${ cipher1 : 7 : 2 } ${ cipher1 : 12 : 2 } " ) "
if [ [ " $supported_sslv2_ciphers " = ~ " $cipher1 " ] ] ; then
if ( [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] && [ [ " ${ TLS_CIPHER_OSSL_NAME [j] } " != "-" ] ] ) || [ [ " ${ TLS_CIPHER_RFC_NAME [j] } " = = "-" ] ] ; then
cipher[ i] = " ${ TLS_CIPHER_OSSL_NAME [j] } "
else
cipher[ i] = " ${ TLS_CIPHER_RFC_NAME [j] } "
2016-12-19 22:56:12 +01:00
fi
2017-04-14 11:26:01 +02:00
break
2016-12-19 22:56:12 +01:00
fi
2017-02-13 22:07:25 +01:00
fi
2017-04-14 11:26:01 +02:00
done
[ [ $DEBUG -ge 2 ] ] && tmln_out " Default cipher for ${ proto [i] } : ${ cipher [i] } "
else
proto[ i] = ""
cipher[ i] = ""
2016-12-19 22:56:12 +01:00
fi
2017-04-14 11:26:01 +02:00
fi
2017-10-18 22:37:35 +02:00
elif ( [ [ $proto_ossl = = ssl3 ] ] && ! " $HAS_SSL3 " ) || ( [ [ $proto_ossl = = tls1_3 ] ] && ! " $HAS_TLS13 " ) ; then
if [ [ $proto_ossl = = ssl3 ] ] ; then
proto_txt = "SSLv3" ; proto_hex = "00" ; cipherlist = " $TLS_CIPHER "
else
proto_txt = "TLSv1.3" ; proto_hex = "04" ; cipherlist = " $TLS13_CIPHER "
fi
2017-04-14 11:26:01 +02:00
if ! " $using_sockets " ; then
2017-10-18 22:37:35 +02:00
out " ( $proto_txt : " ; pr_local_problem " $OPENSSL doesn't support \"s_client - $proto_ossl \" " ; outln ")" ;
2017-04-14 11:26:01 +02:00
continue
2015-09-17 15:30:15 +02:00
else
2017-10-18 22:37:35 +02:00
tls_sockets " $proto_hex " " $cipherlist "
2017-04-14 11:26:01 +02:00
if [ [ $? -eq 0 ] ] ; then
2017-10-18 22:37:35 +02:00
proto[ i] = " $proto_txt "
cipher1 = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
2017-10-17 19:28:15 +02:00
cipher[ i] = " $cipher1 "
2017-04-14 11:26:01 +02:00
if [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] && [ [ $TLS_NR_CIPHERS -ne 0 ] ] ; then
cipher[ i] = " $( rfc2openssl " $cipher1 " ) "
[ [ -z " ${ cipher [i] } " ] ] && cipher[ i] = " $cipher1 "
2017-02-13 22:07:25 +01:00
fi
2017-02-25 16:31:30 +01:00
[ [ $DEBUG -ge 2 ] ] && tmln_out " Default cipher for ${ proto [i] } : ${ cipher [i] } "
2016-12-19 22:56:12 +01:00
else
2016-12-19 22:59:27 +01:00
proto[ i] = ""
cipher[ i] = ""
2016-12-19 22:56:12 +01:00
fi
2015-09-17 15:30:15 +02:00
fi
2017-04-14 11:26:01 +02:00
else
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS - " $proto_ossl " $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) </dev/null 2>>$ERRFILE >$TMPFILE
2017-04-14 11:26:01 +02:00
if sclient_connect_successful $? $TMPFILE ; then
proto[ i] = $( get_protocol $TMPFILE )
cipher[ i] = $( get_cipher $TMPFILE )
[ [ ${ cipher [i] } = = "0000" ] ] && cipher[ i] = "" # Hack!
if [ [ " $DISPLAY_CIPHERNAMES " = ~ rfc ] ] && [ [ -n " ${ cipher [i] } " ] ] ; then
cipher[ i] = " $( openssl2rfc " ${ cipher [i] } " ) "
[ [ -z " ${ cipher [i] } " ] ] && cipher[ i] = $( get_cipher $TMPFILE )
fi
[ [ $DEBUG -ge 2 ] ] && tmln_out " Default cipher for ${ proto [i] } : ${ cipher [i] } "
else
proto[ i] = ""
cipher[ i] = ""
fi
fi
2017-10-18 22:37:35 +02:00
[ [ -n " ${ cipher [i] } " ] ] && add_tls_offered " $proto_ossl " yes
2017-04-14 11:26:01 +02:00
i = $(( i + 1 ))
done
2015-09-17 15:30:15 +02:00
2017-04-14 11:26:01 +02:00
[ [ -n " $STARTTLS " ] ] && arg = " "
2015-09-17 15:30:15 +02:00
2017-04-14 11:26:01 +02:00
for i in 1 2 3 4 5 6; do
if [ [ -n " ${ cipher [i] } " ] ] ; then # cipher not empty
if [ [ -z " $prev_cipher " ] ] || [ [ " $prev_cipher " != " ${ cipher [i] } " ] ] ; then
[ [ -n " $prev_cipher " ] ] && outln
if [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] ; then
out " $( printf -- " %-30s %s" " ${ cipher [i] } : " " ${ proto [i] } " ) " # print out both
2017-02-28 18:33:17 +01:00
else
2017-04-14 11:26:01 +02:00
out " $( printf -- " %-51s %s" " ${ cipher [i] } : " " ${ proto [i] } " ) " # print out both
fi
else
out " , ${ proto [i] } " # same cipher --> only print out protocol behind it
fi
prev_cipher = " ${ cipher [i] } "
fi
fileout " order_ ${ proto [i] } _cipher " "INFO" " Default cipher on ${ proto [i] } : ${ cipher [i] } $limitedsense "
done
2015-09-17 15:30:15 +02:00
outln "\n No further cipher order check has been done as order is determined by the client"
2015-11-03 10:30:59 +01:00
outln
2015-09-17 15:30:15 +02:00
fi
2017-04-14 11:26:01 +02:00
return $ret
2015-05-17 22:43:53 +02:00
}
2016-06-07 13:02:58 +02:00
check_tls12_pref( ) {
local batchremoved = "-CAMELLIA:-IDEA:-KRB5:-PSK:-SRP:-aNULL:-eNULL"
local batchremoved_success = false
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
local tested_cipher = ""
local order = ""
local -i nr_ciphers_found_r1 = 0 nr_ciphers_found_r2 = 0
2016-06-07 13:02:58 +02:00
while true; do
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS -tls1_2 $BUGS -cipher " ALL$tested_cipher :$batchremoved " -connect $NODEIP : $PORT $PROXY $SNI " ) </dev/null 2>>$ERRFILE >$TMPFILE
2016-06-07 13:02:58 +02:00
if sclient_connect_successful $? $TMPFILE ; then
2017-04-13 16:32:19 +02:00
cipher = $( get_cipher $TMPFILE )
2016-06-07 13:02:58 +02:00
order += " $cipher "
tested_cipher = " $tested_cipher :- $cipher "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
nr_ciphers_found_r1 += 1
" $FAST " && break
2016-06-07 13:02:58 +02:00
else
2017-02-25 16:31:30 +01:00
debugme tmln_out " A: $tested_cipher "
2016-06-07 13:02:58 +02:00
break
fi
done
batchremoved = " ${ batchremoved //-/ } "
while true; do
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
# no ciphers from "ALL$tested_cipher:$batchremoved" left
2016-06-07 13:02:58 +02:00
# now we check $batchremoved, and remove the minus signs first:
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS -tls1_2 $BUGS -cipher " $batchremoved " -connect $NODEIP : $PORT $PROXY $SNI " ) </dev/null 2>>$ERRFILE >$TMPFILE
2016-06-07 13:02:58 +02:00
if sclient_connect_successful $? $TMPFILE ; then
batchremoved_success = true # signals that we have some of those ciphers and need to put everything together later on
2017-04-13 16:32:19 +02:00
cipher = $( get_cipher $TMPFILE )
2016-06-07 13:02:58 +02:00
order += " $cipher "
batchremoved = " $batchremoved :- $cipher "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
nr_ciphers_found_r1 += 1
2017-02-25 16:31:30 +01:00
debugme tmln_out " B1: $batchremoved "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
" $FAST " && break
2016-06-07 13:02:58 +02:00
else
2017-02-25 16:31:30 +01:00
debugme tmln_out " B2: $batchremoved "
2016-06-07 13:02:58 +02:00
break
# nothing left with batchremoved ciphers, we need to put everything together
fi
done
if " $batchremoved_success " ; then
# now we combine the two cipher sets from both while loops
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
[ [ " ${ order : 0 : 1 } " = = " " ] ] && order = " ${ order : 1 } "
2016-06-07 13:02:58 +02:00
combined_ciphers = " ${ order // / : } "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
order = "" ; tested_cipher = ""
2016-06-07 13:02:58 +02:00
while true; do
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS -tls1_2 $BUGS -cipher " $combined_ciphers $tested_cipher " -connect $NODEIP : $PORT $PROXY $SNI " ) </dev/null 2>>$ERRFILE >$TMPFILE
2016-06-07 13:02:58 +02:00
if sclient_connect_successful $? $TMPFILE ; then
2017-04-13 16:32:19 +02:00
cipher = $( get_cipher $TMPFILE )
2016-06-07 13:02:58 +02:00
order += " $cipher "
tested_cipher = " $tested_cipher :- $cipher "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
nr_ciphers_found_r2 += 1
" $FAST " && break
2016-06-07 13:02:58 +02:00
else
# nothing left, we're done
break
fi
done
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
if " $FAST " && [ [ $nr_ciphers_found_r2 -ne 1 ] ] ; then
2017-02-25 16:31:30 +01:00
prln_fixme " something weird happened around line $(( LINENO - 14 )) "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
return 1
elif ! " $FAST " && [ [ $nr_ciphers_found_r2 -ne $nr_ciphers_found_r1 ] ] ; then
2017-02-25 16:31:30 +01:00
prln_fixme " something weird happened around line $(( LINENO - 16 )) "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
return 1
fi
2016-06-07 13:02:58 +02:00
fi
2017-02-25 16:31:30 +01:00
tm_out " $order "
2016-06-07 23:06:58 +02:00
tmpfile_handle $FUNCNAME .txt
2016-06-07 13:02:58 +02:00
return 0
}
2015-05-17 22:43:53 +02:00
cipher_pref_check( ) {
2017-09-19 18:37:03 +02:00
local p proto proto_hex npn_protos
2017-02-13 22:07:25 +01:00
local tested_cipher cipher order rfc_ciph rfc_order
2016-06-09 11:04:40 +02:00
local overflow_probe_cipherlist = "ALL:-ECDHE-RSA-AES256-GCM-SHA384:-AES128-SHA:-DES-CBC3-SHA"
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
local -i i nr_ciphers nr_nonossl_ciphers num_bundles mod_check bundle_size bundle end_of_bundle success
local hexc ciphers_to_test
local -a rfc_ciph hexcode ciphers_found ciphers_found2
local -a -i index
local using_sockets = true ciphers_found_with_sockets
" $SSL_NATIVE " && using_sockets = false
" $FAST " && using_sockets = false
[ [ $TLS_NR_CIPHERS = = 0 ] ] && using_sockets = false
2015-09-17 15:30:15 +02:00
pr_bold " Cipher order"
2017-10-02 14:55:57 +02:00
while read p proto_hex proto; do
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
order = "" ; ciphers_found_with_sockets = false
if [ [ $p = = ssl3 ] ] && ! " $HAS_SSL3 " && ! " $using_sockets " ; then
2017-02-25 16:31:30 +01:00
out "\n SSLv3: " ; pr_local_problem " $OPENSSL doesn't support \"s_client -ssl3\" " ;
2016-03-05 21:07:49 +01:00
continue
fi
2017-10-02 13:48:55 +02:00
[ [ $( has_server_protocol " $p " ) -eq 1 ] ] && continue
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
2017-10-18 22:37:35 +02:00
if ( [ [ $p != tls1_3 ] ] || " $HAS_TLS13 " ) && ( [ [ $p != ssl3 ] ] || " $HAS_SSL3 " ) ; then
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
# with the supplied binaries SNI works also for SSLv3
if [ [ $p = = tls1_2 ] ] && ! " $SERVER_SIZE_LIMIT_BUG " ; then
2016-06-09 11:04:40 +02:00
# for some servers the ClientHello is limited to 128 ciphers or the ClientHello itself has a length restriction.
# So far, this was only observed in TLS 1.2, affected are e.g. old Cisco LBs or ASAs, see issue #189
2016-10-28 15:30:07 +02:00
# To check whether a workaround is needed we send a laaarge list of ciphers/big client hello. If connect fails,
2016-06-09 11:04:40 +02:00
# we hit the bug and automagically do the workround. Cost: this is for all servers only 1x more connect
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS -tls1_2 $BUGS -cipher " $overflow_probe_cipherlist " -connect $NODEIP : $PORT $PROXY $SNI " ) </dev/null 2>>$ERRFILE >$TMPFILE
2016-06-09 11:04:40 +02:00
if ! sclient_connect_successful $? $TMPFILE ; then
2016-06-09 15:56:53 +02:00
#FIXME this needs to be handled differently. We need 2 status: BUG={true,false,not tested yet}
2016-06-09 11:04:40 +02:00
SERVER_SIZE_LIMIT_BUG = true
fi
fi
if [ [ $p = = tls1_2 ] ] && " $SERVER_SIZE_LIMIT_BUG " ; then
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
order = " $( check_tls12_pref) "
2016-06-07 13:02:58 +02:00
else
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
tested_cipher = ""
while true; do
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS - " $p " $BUGS -cipher " ALL:COMPLEMENTOFALL$tested_cipher " -connect $NODEIP : $PORT $PROXY $SNI " ) </dev/null 2>>$ERRFILE >$TMPFILE
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
sclient_connect_successful $? $TMPFILE || break
2017-04-13 16:32:19 +02:00
cipher = $( get_cipher $TMPFILE )
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
[ [ -z " $cipher " ] ] && break
2017-02-13 22:07:25 +01:00
order += " $cipher "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
tested_cipher += ":-" $cipher
" $FAST " && break
done
fi
fi
nr_nonossl_ciphers = 0
if " $using_sockets " ; then
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
ciphers_found[ i] = false
hexc = " ${ TLS_CIPHER_HEXCODE [i] } "
if [ [ ${# hexc } -eq 9 ] ] ; then
if [ [ " $order " = ~ " ${ TLS_CIPHER_OSSL_NAME [i] } " ] ] ; then
ciphers_found[ i] = true
else
ciphers_found2[ nr_nonossl_ciphers] = false
hexcode[ nr_nonossl_ciphers] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } "
rfc_ciph[ nr_nonossl_ciphers] = " ${ TLS_CIPHER_RFC_NAME [i] } "
index[ nr_nonossl_ciphers] = $i
# Only test ciphers that are relevant to the protocol.
if [ [ " $p " = = "tls1_3" ] ] ; then
[ [ " ${ hexc : 2 : 2 } " = = "13" ] ] && nr_nonossl_ciphers += 1
elif [ [ " $p " = = "tls1_2" ] ] ; then
[ [ " ${ hexc : 2 : 2 } " != "13" ] ] && nr_nonossl_ciphers += 1
2017-02-24 16:22:59 +01:00
elif [ [ ! " ${ TLS_CIPHER_RFC_NAME [i] } " = ~ SHA256 ] ] && \
[ [ ! " ${ TLS_CIPHER_RFC_NAME [i] } " = ~ SHA384 ] ] && \
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
[ [ " ${ TLS_CIPHER_RFC_NAME [i] } " != *"_CCM" ] ] && \
[ [ " ${ TLS_CIPHER_RFC_NAME [i] } " != *"_CCM_8" ] ] ; then
nr_nonossl_ciphers += 1
fi
fi
fi
done
fi
if [ [ $nr_nonossl_ciphers -eq 0 ] ] ; then
num_bundles = 0
elif [ [ $p != tls1_2 ] ] || ! " $SERVER_SIZE_LIMIT_BUG " ; then
num_bundles = 1
bundle_size = $nr_nonossl_ciphers
else
num_bundles = $nr_nonossl_ciphers /128
mod_check = $nr_nonossl_ciphers %128
[ [ $mod_check -ne 0 ] ] && num_bundles = $num_bundles +1
bundle_size = $nr_nonossl_ciphers /$num_bundles
mod_check = $nr_nonossl_ciphers %$num_bundles
[ [ $mod_check -ne 0 ] ] && bundle_size += 1
fi
for ( ( bundle = 0; bundle < num_bundles; bundle++ ) ) ; do
end_of_bundle = $bundle *$bundle_size +$bundle_size
[ [ $end_of_bundle -gt $nr_nonossl_ciphers ] ] && end_of_bundle = $nr_nonossl_ciphers
while true; do
ciphers_to_test = ""
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
! " ${ ciphers_found2 [i] } " && ciphers_to_test += " , ${ hexcode [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
tls_sockets " $proto_hex " " ${ ciphers_to_test : 2 } , 00,ff " "ephemeralkey"
[ [ $? -ne 0 ] ] && break
2017-04-13 16:32:19 +02:00
cipher = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
for ( ( i = bundle*bundle_size; i < end_of_bundle; i++ ) ) ; do
[ [ " $cipher " = = " ${ rfc_ciph [i] } " ] ] && ciphers_found2[ i] = true && break
done
i = ${ index [i] }
ciphers_found[ i] = true
ciphers_found_with_sockets = true
if [ [ $p != tls1_2 ] ] || ! " $SERVER_SIZE_LIMIT_BUG " ; then
# Throw out the results found so far and start over using just sockets
bundle = $num_bundles
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
ciphers_found[ i] = true
2016-11-15 12:59:07 +01:00
done
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
break
2016-11-15 12:59:07 +01:00
fi
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
done
done
# If additional ciphers were found using sockets and there is no
# SERVER_SIZE_LIMIT_BUG, then just use sockets to find the cipher order.
# If there is a SERVER_SIZE_LIMIT_BUG, then use sockets to find the cipher
# order, but starting with the list of ciphers supported by the server.
if " $ciphers_found_with_sockets " ; then
order = ""
nr_ciphers = 0
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
hexc = " ${ TLS_CIPHER_HEXCODE [i] } "
if " ${ ciphers_found [i] } " && [ [ ${# hexc } -eq 9 ] ] ; then
ciphers_found2[ nr_ciphers] = false
hexcode[ nr_ciphers] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } "
rfc_ciph[ nr_ciphers] = " ${ TLS_CIPHER_RFC_NAME [i] } "
if [ [ " $p " = = "tls1_3" ] ] ; then
[ [ " ${ hexc : 2 : 2 } " = = "13" ] ] && nr_ciphers += 1
elif [ [ " $p " = = "tls1_2" ] ] ; then
[ [ " ${ hexc : 2 : 2 } " != "13" ] ] && nr_ciphers += 1
2017-02-24 16:22:59 +01:00
elif [ [ ! " ${ TLS_CIPHER_RFC_NAME [i] } " = ~ SHA256 ] ] && \
[ [ ! " ${ TLS_CIPHER_RFC_NAME [i] } " = ~ SHA384 ] ] && \
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
[ [ " ${ TLS_CIPHER_RFC_NAME [i] } " != *"_CCM" ] ] && \
[ [ " ${ TLS_CIPHER_RFC_NAME [i] } " != *"_CCM_8" ] ] ; then
nr_ciphers += 1
fi
fi
done
while true; do
ciphers_to_test = ""
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
! " ${ ciphers_found2 [i] } " && ciphers_to_test += " , ${ hexcode [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
tls_sockets " $proto_hex " " ${ ciphers_to_test : 2 } , 00,ff " "ephemeralkey"
[ [ $? -ne 0 ] ] && break
2017-04-13 16:32:19 +02:00
cipher = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
[ [ " $cipher " = = " ${ rfc_ciph [i] } " ] ] && ciphers_found2[ i] = true && break
done
2017-02-27 16:34:04 +01:00
if [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] && [ [ $TLS_NR_CIPHERS -ne 0 ] ] ; then
2017-02-13 22:07:25 +01:00
cipher = " $( rfc2openssl " $cipher " ) "
# If there is no OpenSSL name for the cipher, then use the RFC name
2017-04-13 16:32:19 +02:00
[ [ -z " $cipher " ] ] && cipher = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
2017-02-13 22:07:25 +01:00
fi
order += " $cipher "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
done
2017-02-27 16:34:04 +01:00
elif [ [ -n " $order " ] ] && [ [ " $DISPLAY_CIPHERNAMES " = ~ rfc ] ] ; then
2017-02-13 22:07:25 +01:00
rfc_order = ""
while read -d " " cipher; do
rfc_ciph = " $( openssl2rfc " $cipher " ) "
if [ [ -n " $rfc_ciph " ] ] ; then
rfc_order += " $rfc_ciph "
else
rfc_order += " $cipher "
fi
done <<< " $order "
order = " $rfc_order "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
fi
if [ [ -n " $order " ] ] ; then
2017-10-02 14:55:57 +02:00
add_tls_offered " $p " yes
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
outln
2017-02-14 14:50:56 +01:00
out " $( printf " %-10s " " $proto : " ) "
2017-03-28 19:54:54 +02:00
out " $( out_row_aligned_max_width " $order " " " $TERM_WIDTH ) "
Use sockets to determine cipher order
This PR modifies `cipher_pref_check()` to use `tls_sockets()`. As with similar PRs for `run_allciphers()`, `run_cipher_per_proto()`, and `run_rc4()`, it also makes use of `$OPENSSL s_client`, since `$OPENSSL s_client` is faster than `tls_sockets()`.
With this PR, `cipher_pref_check()` first uses `$OPENSSL s_client` to obtain an ordered list of ciphers. It then makes one call to `tls_sockets()` (or a few calls if proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`) to find if the server supports any ciphers that are not detected by `$OPENSSL s_client`. If not, then it is done. If it finds one, then it throws out the previous results and starts over with `tls_sockets()`. [If proto is TLSv1.2 and `$SERVER_SIZE_LIMIT_BUG` is `true`, then it doesn't throw out the `$OPENSSL s_client` results. Instead, it continues with `tls_sockets()` to get the full list of supported ciphers, and then uses `tls_sockets()` to order that list.]
The result is that this PR works almost as fast as the current `cipher_pref_check()` if `$OPENSSL s_client` finds all of the supported ciphers, at the cost of a performance penalty when testing servers that support ciphers that would have otherwise been missed using just OpenSSL.
Note that in this PR I removed SSLv2 from the list of protocols tested. This is because https://community.qualys.com/thread/16255 states that "in SSLv2 the client selects the suite to use." It seems that in SSLv2, the client sends a list of ciphers that it supports, the server responds with a list of ciphers that the client and server have in common, and then "the client selects the suite to use." So, showing a cipher order for SSLv2 is a bit misleading.
As noted in #543, this PR does not modify the second part of `cipher_pref_check()`, which deals with NPN protocols.
2016-12-01 22:51:38 +01:00
fileout " order_ $p " "INFO" " Default cipher order for protocol $p : $order "
2015-09-17 15:30:15 +02:00
fi
2017-10-18 22:37:35 +02:00
done <<< " $( tm_out " ssl3 00 SSLv3\n tls1 01 TLSv1\n tls1_1 02 TLSv1.1\n tls1_2 03 TLSv1.2\n tls1_3 04 TLSv1.3\n" ) "
2015-09-17 15:30:15 +02:00
outln
2015-11-03 10:30:59 +01:00
outln
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
return 0
2015-05-17 22:43:53 +02:00
}
run_drown() when server has non-RSA certificates
This PR address a problem in `run_drown()` when the server does not support SSLv2, but does support multiple certificates or doesn't have an RSA certificate.
One example of the problem can be seen with www.facebook.com. If `run_server_preferences()` is run before `run_drown()`, then the results of `run_drown()` are:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 SHA256 A3F474FB17509AE6C5B6BA5E46B79E0DE6AF1BF1EEAA040A6114676E714C9965 could help you to find out
```
If only `run_drown()` is performed, then the result is:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 could help you to find out
```
However, A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 is the fingerprint of Facebook's ECDSA certificate, not its RSA certificate.
In addition, as noted in the "FIXME," `run_drown()` will display the warning "make sure you don't use this certificate elsewhere with SSLv2 enabled services" even if the server doesn't have an RSA certificate, even though SSLv2 can only use RSA certificates.
This PR fixes this issue by only showing the warning if the server has an RSA certificate and by ensuring that the `$cert_fingerprint_sha2` used to construct the "https://censys.io/ipv4?q=..." URL only contains a single SHA256 fingerprint and that it is the fingerprint of the server's RSA certificate.
2016-12-02 16:16:04 +01:00
# arg1 is OpenSSL s_client parameter or empty
2015-05-18 21:51:45 +02:00
get_host_cert( ) {
2015-10-11 23:07:16 +02:00
local tmpvar = $TEMPDIR /$FUNCNAME .txt # change later to $TMPFILE
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI $1 " ) 2>/dev/null </dev/null >$tmpvar
2015-10-11 23:07:16 +02:00
if sclient_connect_successful $? $tmpvar ; then
awk '/-----BEGIN/,/-----END/ { print $0 }' $tmpvar >$HOSTCERT
2016-02-20 11:07:47 +01:00
return 0
2015-10-11 23:07:16 +02:00
else
2017-08-28 18:25:45 +02:00
if [ [ -z " $1 " ] ] ; then
prln_warning "could not retrieve host certificate!"
2017-08-29 15:03:47 +02:00
fileout "host_certificate_Problem" "WARN" "Could not retrieve host certificate!"
2017-08-28 18:25:45 +02:00
fi
2015-10-11 23:07:16 +02:00
return 1
fi
2016-02-20 11:07:47 +01:00
#tmpfile_handle $FUNCNAME.txt
#return $((${PIPESTATUS[0]} + ${PIPESTATUS[1]}))
2015-05-18 21:51:45 +02:00
}
2015-09-22 15:05:59 +02:00
verify_retcode_helper( ) {
local ret = 0
2016-01-28 23:06:34 +01:00
local -i retcode = $1
2015-09-22 15:05:59 +02:00
2017-09-25 19:51:10 +02:00
case $retcode in
# codes from ./doc/apps/verify.pod | verify(1ssl)
26) tm_out "(unsupported certificate purpose)" ; ; # X509_V_ERR_INVALID_PURPOSE
24) tm_out "(certificate unreadable)" ; ; # X509_V_ERR_INVALID_CA
23) tm_out "(certificate revoked)" ; ; # X509_V_ERR_CERT_REVOKED
21) tm_out "(chain incomplete, only 1 cert provided)" ; ; # X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE
20) tm_out "(chain incomplete)" ; ; # X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY
19) tm_out "(self signed CA in chain)" ; ; # X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN
18) tm_out "(self signed)" ; ; # X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT
10) tm_out "(expired)" ; ; # X509_V_ERR_CERT_HAS_EXPIRED
9) tm_out "(not yet valid)" ; ; # X509_V_ERR_CERT_NOT_YET_VALID
2) tm_out "(issuer cert missing)" ; ; # X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT
*) ret = 1 ; tm_out " (unknown, pls report) $1 " ; ;
esac
2015-09-22 15:05:59 +02:00
return $ret
}
2016-03-12 17:08:43 +01:00
# arg1: number of certificate if provided >1
2015-09-22 15:05:59 +02:00
determine_trust( ) {
2017-09-25 19:51:10 +02:00
local json_prefix = $1
local -i i = 1
local -i num_ca_bundles = 0
local bundle_fname = ""
local -a certificate_file verify_retcode trust
local ok_was = ""
local notok_was = ""
local all_ok = true
local some_ok = false
2015-09-22 15:05:59 +02:00
local code
2016-10-02 18:15:13 +02:00
local ca_bundles = ""
2016-02-09 19:35:46 +01:00
local spaces = " "
2016-01-28 23:06:34 +01:00
local -i certificates_provided = 1+$( grep -c "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TEMPDIR /intermediatecerts.pem)
2016-01-23 19:18:33 +01:00
local addtl_warning
2016-10-28 15:30:07 +02:00
# If $json_prefix is not empty, then there is more than one certificate
2016-02-09 19:35:46 +01:00
# and the output should should be indented by two more spaces.
[ [ -n $json_prefix ] ] && spaces = " "
2015-09-22 15:05:59 +02:00
2017-09-23 11:34:37 +02:00
case $OSSL_VER_MAJOR .$OSSL_VER_MINOR in
1.0.2| 1.1.0| 1.1.1| 2.3.*| 2.2.*| 2.1.*) # 2.x is LibreSSL. 2.1.1 was tested to work, below is not sure
:
; ;
*) addtl_warning = " (Your $OPENSSL <= 1.0.2 might be too unreliable to determine trust) "
fileout " ${ json_prefix } chain_of_trust_Problem " "WARN" " $addtl_warning "
; ;
esac
2017-02-25 16:31:30 +01:00
debugme tmln_out
2016-10-02 18:15:13 +02:00
# if you run testssl.sh from a different path /you can set either TESTSSL_INSTALL_DIR or CA_BUNDLES_PATH to find the CA BUNDLES
2017-04-12 20:34:26 +02:00
if [ [ -z " $CA_BUNDLES_PATH " ] ] ; then
2016-10-02 18:15:13 +02:00
ca_bundles = " $TESTSSL_INSTALL_DIR /etc/*.pem "
else
2016-10-08 22:50:44 +02:00
ca_bundles = " $CA_BUNDLES_PATH /*.pem "
2016-10-02 18:15:13 +02:00
fi
2017-09-25 19:51:10 +02:00
for bundle_fname in $ca_bundles ; do
certificate_file[ i] = $( basename ${ bundle_fname //.pem } )
2015-09-28 22:54:00 +02:00
if [ [ ! -r $bundle_fname ] ] ; then
2017-02-25 16:31:30 +01:00
prln_warning " \" $bundle_fname \" cannot be found / not readable "
2015-09-22 15:05:59 +02:00
return 7
fi
2017-09-25 19:51:10 +02:00
debugme printf -- " %-12s" " ${ certificate_file [i] } "
# set SSL_CERT_DIR to /dev/null so that $OPENSSL verify will only use certificates in $bundle_fname
( export SSL_CERT_DIR = "/dev/null; export SSL_CERT_FILE=/dev/null"
if [ [ $certificates_provided -ge 2 ] ] ; then
$OPENSSL verify -purpose sslserver -CAfile " $bundle_fname " -untrusted $TEMPDIR /intermediatecerts.pem $HOSTCERT >$TEMPDIR /${ certificate_file [i] } .1 2>$TEMPDIR /${ certificate_file [i] } .2
else
$OPENSSL verify -purpose sslserver -CAfile " $bundle_fname " $HOSTCERT >$TEMPDIR /${ certificate_file [i] } .1 2>$TEMPDIR /${ certificate_file [i] } .2
fi )
verify_retcode[ i] = $( awk '/error [1-9][0-9]? at [0-9]+ depth lookup:/ { if (!found) {print $2; found=1} }' $TEMPDIR /${ certificate_file [i] } .1)
[ [ -z " ${ verify_retcode [i] } " ] ] && verify_retcode[ i] = 0
if [ [ ${ verify_retcode [i] } -eq 0 ] ] ; then
trust[ i] = true
some_ok = true
debugme tm_done_good "Ok "
debugme tmln_out " ${ verify_retcode [i] } "
else
trust[ i] = false
all_ok = false
debugme tm_svrty_high "not trusted "
debugme tmln_out " ${ verify_retcode [i] } "
fi
i = $(( i + 1 ))
done
num_ca_bundles = $(( i - 1 ))
2017-02-25 16:31:30 +01:00
debugme tm_out " "
2017-10-31 12:23:16 +01:00
if " $all_ok " ; then
2017-09-25 19:51:10 +02:00
# all stores ok
pr_done_good "Ok " ; pr_warning " $addtl_warning "
2016-03-05 21:07:49 +01:00
# we did to stdout the warning above already, so we could stay here with INFO:
2016-07-22 17:31:52 +02:00
fileout " ${ json_prefix } chain_of_trust " "OK" " All certificate trust checks passed. $addtl_warning "
2017-09-25 19:51:10 +02:00
else
# at least one failed
pr_svrty_critical "NOT ok"
2017-10-31 12:23:16 +01:00
if ! " $some_ok " ; then
2017-09-25 19:51:10 +02:00
# all failed (we assume with the same issue), we're displaying the reason
2016-02-01 17:33:59 +01:00
out " "
2017-02-07 20:25:41 +01:00
code = " $( verify_retcode_helper " ${ verify_retcode [1] } " ) "
2017-09-25 19:51:10 +02:00
if [ [ " $code " = ~ "pls report" ] ] ; then
pr_warning " $code "
else
out " $code "
fi
2017-02-07 20:25:41 +01:00
fileout " ${ json_prefix } chain_of_trust " "CRITICAL" " All certificate trust checks failed: $code . $addtl_warning "
2017-09-25 19:51:10 +02:00
else
# is one ok and the others not ==> display the culprit store
2017-10-31 12:23:16 +01:00
if " $some_ok " ; then
2017-09-25 19:51:10 +02:00
pr_svrty_critical ":"
for ( ( i = 1; i<= num_ca_bundles; i++) ) ; do
if ${ trust [i] } ; then
ok_was = " ${ certificate_file [i] } $ok_was "
else
2015-09-22 15:05:59 +02:00
#code="$(verify_retcode_helper ${verify_retcode[i]})"
#notok_was="${certificate_file[i]} $notok_was"
2016-03-01 20:25:41 +01:00
pr_svrty_high " ${ certificate_file [i] } "
2017-02-07 20:25:41 +01:00
code = " $( verify_retcode_helper " ${ verify_retcode [i] } " ) "
if [ [ " $code " = ~ "pls report" ] ] ; then
pr_warning " $code "
else
out " $code "
fi
2017-09-25 19:51:10 +02:00
notok_was = " ${ certificate_file [i] } $code $notok_was "
fi
done
#pr_svrty_high "$notok_was "
2015-09-22 15:05:59 +02:00
#outln "$code"
outln
2017-09-25 19:51:10 +02:00
# lf + green ones
2017-02-25 16:31:30 +01:00
[ [ " $DEBUG " -eq 0 ] ] && tm_out " $spaces "
2017-09-25 19:51:10 +02:00
pr_done_good " OK: $ok_was "
2015-09-22 15:05:59 +02:00
fi
2016-10-28 15:30:07 +02:00
fileout " ${ json_prefix } chain_of_trust " "CRITICAL" " Some certificate trust checks failed : OK : $ok_was NOT ok: $notok_was $addtl_warning "
2016-01-23 19:18:33 +01:00
fi
2016-03-05 21:07:49 +01:00
[ [ -n " $addtl_warning " ] ] && out " \n $spaces " && pr_warning " $addtl_warning "
2017-09-25 19:51:10 +02:00
fi
outln
2015-09-28 22:54:00 +02:00
return 0
2015-09-22 15:05:59 +02:00
}
2015-09-28 22:54:00 +02:00
2015-09-22 15:05:59 +02:00
# not handled: Root CA supplied (contains anchor)
2015-06-19 20:36:32 +02:00
tls_time( ) {
2015-09-17 15:30:15 +02:00
local now difftime
2016-01-30 23:59:29 +01:00
local spaces = " "
2015-09-17 15:30:15 +02:00
2017-10-31 11:27:19 +01:00
pr_bold " TLS clock skew" ; out " $spaces "
TLS_DIFFTIME_SET = true # this is a switch whether we want to measure the remote TLS_TIME
2016-04-21 18:04:33 +02:00
tls_sockets "01" " $TLS_CIPHER " # try first TLS 1.0 (most frequently used protocol)
2015-09-17 15:30:15 +02:00
[ [ -z " $TLS_TIME " ] ] && tls_sockets "03" " $TLS12_CIPHER " # TLS 1.2
[ [ -z " $TLS_TIME " ] ] && tls_sockets "02" " $TLS_CIPHER " # TLS 1.1
[ [ -z " $TLS_TIME " ] ] && tls_sockets "00" " $TLS_CIPHER " # SSL 3
if [ [ -n " $TLS_TIME " ] ] ; then # nothing returned a time!
2017-10-31 11:27:19 +01:00
difftime = $(( TLS_TIME - TLS_NOW)) # TLS_NOW has been set in tls_sockets()
2015-09-17 15:30:15 +02:00
if [ [ " ${# difftime } " -gt 5 ] ] ; then
# openssl >= 1.0.1f fills this field with random values! --> good for possible fingerprint
2017-04-22 15:39:18 +02:00
out "Random values, no fingerprinting possible "
2016-01-23 23:33:17 +01:00
fileout "tls_time" "INFO" "Your TLS time seems to be filled with random values to prevent fingerprinting"
2015-09-17 15:30:15 +02:00
else
[ [ $difftime != "-" * ] ] && [ [ $difftime != "0" ] ] && difftime = " + $difftime "
2016-01-30 23:59:29 +01:00
out " $difftime " ; out " sec from localtime" ;
2016-01-23 23:33:17 +01:00
fileout "tls_time" "INFO" " Your TLS time is skewed from your localtime by $difftime seconds "
2015-09-17 15:30:15 +02:00
fi
2017-02-25 16:31:30 +01:00
debugme tm_out " $TLS_TIME "
2015-09-17 15:30:15 +02:00
outln
else
2017-10-31 12:23:16 +01:00
outln "SSLv3 through TLS 1.2 didn't return a timestamp"
2016-01-23 23:33:17 +01:00
fileout "tls_time" "INFO" "No TLS timestamp returned by SSLv3 through TLSv1.2"
2015-09-17 15:30:15 +02:00
fi
2017-10-31 11:27:19 +01:00
TLS_DIFFTIME_SET = false # reset the switch to save calls to date and friend in tls_sockets()
2015-10-11 23:07:16 +02:00
return 0
2015-06-19 20:36:32 +02:00
}
2015-05-17 22:43:53 +02:00
2015-11-03 10:30:59 +01:00
# core function determining whether handshake succeded or not
2015-10-11 23:07:16 +02:00
sclient_connect_successful( ) {
[ [ $1 -eq 0 ] ] && return 0
2015-11-03 13:13:10 +01:00
[ [ -n $( awk '/Master-Key: / { print $2 }' " $2 " ) ] ] && return 0
2016-01-23 19:18:33 +01:00
# second check saved like
2015-11-03 13:13:10 +01:00
# fgrep 'Cipher is (NONE)' "$2" &> /dev/null && return 1
# what's left now is: master key empty and Session-ID not empty ==> probably client based auth with x509 certificate
return 1
2015-10-11 23:07:16 +02:00
}
2017-03-22 20:18:38 +01:00
extract_new_tls_extensions( ) {
local tls_extensions
# this is not beautiful (grep+sed)
# but maybe we should just get the ids and do a private matching, according to
# https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml
tls_extensions = $( grep -a 'TLS server extension ' " $1 " | \
sed -e 's/TLS server extension //g' -e 's/\" (id=/\/#/g' \
-e 's/,.*$/,/g' -e 's/),$/\"/g' \
-e 's/elliptic curves\/#10/supported_groups\/#10/g' )
tls_extensions = $( echo $tls_extensions ) # into one line
if [ [ -n " $tls_extensions " ] ] ; then
# check to see if any new TLS extensions were returned and add any new ones to TLS_EXTENSIONS
while read -d "\"" -r line; do
if [ [ $line != "" ] ] && [ [ ! " $TLS_EXTENSIONS " = ~ " $line " ] ] ; then
2017-04-10 14:45:39 +02:00
#FIXME: This is a string of quoted strings, so this seems to determine the output format already. Better e.g. would be an array
2017-03-22 20:18:38 +01:00
TLS_EXTENSIONS += " \" ${ line } \" "
fi
done <<< $tls_extensions
[ [ " ${ TLS_EXTENSIONS : 0 : 1 } " = = " " ] ] && TLS_EXTENSIONS = " ${ TLS_EXTENSIONS : 1 } "
fi
}
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
# Note that since, at the moment, this function is only called by run_server_defaults()
# and run_heartbleed(), this function does not look for the status request or NPN
# extensions. For run_heartbleed(), only the heartbeat extension needs to be detected.
# For run_server_defaults(), the status request and NPN would already be detected by
# get_server_certificate(), if they are supported. In the case of the status extension,
# since including a status request extension in a ClientHello does not work for GOST
# only servers. In the case of NPN, since a server will not include both the NPN and
# ALPN extensions in the same ServerHello.
determine_tls_extensions( ) {
local addcmd
2017-01-13 18:13:20 +01:00
local -i success = 1
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
local line params = "" tls_extensions = ""
local alpn_proto alpn = "" alpn_list_len_hex alpn_extn_len_hex
local -i alpn_list_len alpn_extn_len
2017-01-13 18:13:20 +01:00
local cbc_cipher_list = "ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DH-RSA-AES256-SHA256:DH-DSS-AES256-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DH-RSA-AES256-SHA:DH-DSS-AES256-SHA:ECDHE-RSA-CAMELLIA256-SHA384:ECDHE-ECDSA-CAMELLIA256-SHA384:DHE-RSA-CAMELLIA256-SHA256:DHE-DSS-CAMELLIA256-SHA256:DH-RSA-CAMELLIA256-SHA256:DH-DSS-CAMELLIA256-SHA256:DHE-RSA-CAMELLIA256-SHA:DHE-DSS-CAMELLIA256-SHA:DH-RSA-CAMELLIA256-SHA:DH-DSS-CAMELLIA256-SHA:ECDH-RSA-AES256-SHA384:ECDH-ECDSA-AES256-SHA384:ECDH-RSA-AES256-SHA:ECDH-ECDSA-AES256-SHA:ECDH-RSA-CAMELLIA256-SHA384:ECDH-ECDSA-CAMELLIA256-SHA384:AES256-SHA256:AES256-SHA:CAMELLIA256-SHA256:CAMELLIA256-SHA:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DH-RSA-AES128-SHA256:DH-DSS-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:DH-RSA-AES128-SHA:DH-DSS-AES128-SHA:ECDHE-RSA-CAMELLIA128-SHA256:ECDHE-ECDSA-CAMELLIA128-SHA256:DHE-RSA-CAMELLIA128-SHA256:DHE-DSS-CAMELLIA128-SHA256:DH-RSA-CAMELLIA128-SHA256:DH-DSS-CAMELLIA128-SHA256:DHE-RSA-SEED-SHA:DHE-DSS-SEED-SHA:DH-RSA-SEED-SHA:DH-DSS-SEED-SHA:DHE-RSA-CAMELLIA128-SHA:DHE-DSS-CAMELLIA128-SHA:DH-RSA-CAMELLIA128-SHA:DH-DSS-CAMELLIA128-SHA:ECDH-RSA-AES128-SHA256:ECDH-ECDSA-AES128-SHA256:ECDH-RSA-AES128-SHA:ECDH-ECDSA-AES128-SHA:ECDH-RSA-CAMELLIA128-SHA256:ECDH-ECDSA-CAMELLIA128-SHA256:AES128-SHA256:AES128-SHA:CAMELLIA128-SHA256:SEED-SHA:CAMELLIA128-SHA:IDEA-CBC-SHA:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:EDH-DSS-DES-CBC3-SHA:DH-RSA-DES-CBC3-SHA:DH-DSS-DES-CBC3-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-ECDSA-DES-CBC3-SHA:DES-CBC3-SHA:EXP1024-DHE-DSS-DES-CBC-SHA:EDH-RSA-DES-CBC-SHA:EDH-DSS-DES-CBC-SHA:DH-RSA-DES-CBC-SHA:DH-DSS-DES-CBC-SHA:EXP1024-DES-CBC-SHA:DES-CBC-SHA:EXP-EDH-RSA-DES-CBC-SHA:EXP-EDH-DSS-DES-CBC-SHA:EXP-DES-CBC-SHA:EXP-RC2-CBC-MD5:EXP-DH-DSS-DES-CBC-SHA:EXP-DH-RSA-DES-CBC-SHA"
local cbc_cipher_list_hex = "c0,28, c0,24, c0,14, c0,0a, 00,6b, 00,6a, 00,69, 00,68, 00,39, 00,38, 00,37, 00,36, c0,77, c0,73, 00,c4, 00,c3, 00,c2, 00,c1, 00,88, 00,87, 00,86, 00,85, c0,2a, c0,26, c0,0f, c0,05, c0,79, c0,75, 00,3d, 00,35, 00,c0, 00,84, c0,3d, c0,3f, c0,41, c0,43, c0,45, c0,49, c0,4b, c0,4d, c0,4f, c0,27, c0,23, c0,13, c0,09, 00,67, 00,40, 00,3f, 00,3e, 00,33, 00,32, 00,31, 00,30, c0,76, c0,72, 00,be, 00,bd, 00,bc, 00,bb, 00,9a, 00,99, 00,98, 00,97, 00,45, 00,44, 00,43, 00,42, c0,29, c0,25, c0,0e, c0,04, c0,78, c0,74, 00,3c, 00,2f, 00,ba, 00,96, 00,41, 00,07, c0,3c, c0,3e, c0,40, c0,42, c0,44, c0,48, c0,4a, c0,4c, c0,4e, c0,12, c0,08, 00,16, 00,13, 00,10, 00,0d, c0,0d, c0,03, 00,0a, fe,ff, ff,e0, 00,63, 00,15, 00,12, 00,0f, 00,0c, 00,62, 00,09, fe,fe, ff,e1, 00,14, 00,11, 00,08, 00,06, 00,0b, 00,0e"
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
local using_sockets = true
[ [ " $OPTIMAL_PROTO " = = "-ssl2" ] ] && return 0
" $SSL_NATIVE " && using_sockets = false
if " $using_sockets " ; then
2017-01-13 18:13:20 +01:00
tls_extensions = "00,01,00,01,02, 00,02,00,00, 00,04,00,00, 00,12,00,00, 00,16,00,00, 00,17,00,00"
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
if [ [ -z $STARTTLS ] ] ; then
for alpn_proto in $ALPN_PROTOs ; do
alpn += " , $( printf "%02x" ${# alpn_proto } ) , $( string_to_asciihex " $alpn_proto " ) "
done
alpn_list_len = ${# alpn } /3
alpn_list_len_hex = $( printf "%04x" $alpn_list_len )
alpn_extn_len = $alpn_list_len +2
alpn_extn_len_hex = $( printf "%04x" $alpn_extn_len )
2017-01-13 18:13:20 +01:00
tls_extensions += " , 00,10, ${ alpn_extn_len_hex : 0 : 2 } , ${ alpn_extn_len_hex : 2 : 2 } , ${ alpn_list_len_hex : 0 : 2 } , ${ alpn_list_len_hex : 2 : 2 } $alpn "
fi
2017-02-24 16:22:59 +01:00
if [ [ ! " $TLS_EXTENSIONS " = ~ encrypt-then-mac ] ] ; then
2017-01-13 18:13:20 +01:00
tls_sockets "03" " $cbc_cipher_list_hex , 00,ff " "all" " $tls_extensions "
success = $?
fi
if [ [ $success -ne 0 ] ] && [ [ $success -ne 2 ] ] ; then
tls_sockets "03" " $TLS12_CIPHER " "all" " $tls_extensions "
success = $?
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
fi
[ [ $success -eq 2 ] ] && success = 0
2017-03-22 20:18:38 +01:00
[ [ $success -eq 0 ] ] && extract_new_tls_extensions " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt "
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
if [ [ -r " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " ] ] ; then
cp " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " $TMPFILE
tmpfile_handle $FUNCNAME .txt
fi
else
if " $HAS_ALPN " && [ [ -z $STARTTLS ] ] ; then
params = " -alpn \" ${ ALPN_PROTOs // /, } \" " # we need to replace " " by ","
elif " $HAS_SPDY " && [ [ -z $STARTTLS ] ] ; then
params = " -nextprotoneg \" $NPN_PROTOs \" "
fi
if [ [ -z " $OPTIMAL_PROTO " ] ] && [ [ -z " $SNI " ] ] && " $HAS_NO_SSL2 " ; then
addcmd = "-no_ssl2"
2017-09-19 18:37:03 +02:00
else
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
addcmd = " $SNI "
fi
2017-02-24 16:22:59 +01:00
if [ [ ! " $TLS_EXTENSIONS " = ~ encrypt-then-mac ] ] ; then
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $addcmd $OPTIMAL_PROTO -tlsextdebug $params -cipher $cbc_cipher_list " ) </dev/null 2>$ERRFILE >$TMPFILE
2017-01-13 18:13:20 +01:00
sclient_connect_successful $? $TMPFILE
success = $?
fi
if [ [ $success -ne 0 ] ] ; then
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $addcmd $OPTIMAL_PROTO -tlsextdebug $params " ) </dev/null 2>$ERRFILE >$TMPFILE
2017-01-13 18:13:20 +01:00
sclient_connect_successful $? $TMPFILE
success = $?
fi
2017-03-22 20:18:38 +01:00
[ [ $success -eq 0 ] ] && extract_new_tls_extensions $TMPFILE
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
tmpfile_handle $FUNCNAME .txt
fi
return $success
}
2016-01-28 23:06:34 +01:00
# arg1 is "-cipher <OpenSSL cipher>" or empty
2016-06-01 21:57:40 +02:00
# arg2 is a list of protocols to try (tls1_2, tls1_1, tls1, ssl3) or empty (if all should be tried)
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
get_server_certificate( ) {
2017-07-03 22:24:02 +02:00
local protocols_to_try proto
2015-10-11 23:07:16 +02:00
local success
2017-03-22 20:18:38 +01:00
local npn_params = "" line
2016-01-28 23:06:34 +01:00
local savedir
local nrsaved
2015-09-17 15:30:15 +02:00
2017-02-21 11:16:14 +01:00
" $HAS_SPDY " && [ [ -z " $STARTTLS " ] ] && npn_params = " -nextprotoneg \" $NPN_PROTOs \" "
2015-09-17 15:30:15 +02:00
2016-06-01 21:57:40 +02:00
if [ [ -n " $2 " ] ] ; then
protocols_to_try = " $2 "
else
protocols_to_try = "tls1_2 tls1_1 tls1 ssl3"
fi
2015-09-17 15:30:15 +02:00
# throwing 1st every cipher/protocol at the server to know what works
2015-10-11 23:07:16 +02:00
success = 7
2016-07-05 16:19:53 +02:00
More SSLv2 (and SSLv3) related fixes
In doing some work on cipher_pref_check() I noticed that it was failing on SSLv2 since the call to "$OPENSSL s_client" includes SNI. I've also noticed in my testing that "$OPENSSL s_client" will not connect to an SSLv2-only server unless the "-ssl2" flag is included. So, I carefully checked each call to "$OPENSSL s_client" in the program (other than in run_allciphers and run_cipher_per_proto, since those functions are already addresses in PR #341) to see whether they would inappropriate fail with an SSLv2-only (or SSLv3-only) server.
As a general rule, if the call doesn't currently include the protocol, then I added "-ssl2" if $OPTIMAL_PROTO is "-ssl2", indicating that the server only supports SSLv2, and I removed any $SNI if a protocol is specified if a protocol is specified and it is either SSLv2 or SSLv3.
I tested it on an SSLv2-only server, and the results are much better. I also tested it on a collection of other servers, none of which support SSLv2, and the results are the same as with the current code.
The only thing I haven't been able to test is how the revised code works when the "--starttls" option is used. I don't believe the changes I made would cause anything to break in that case, but I also don't think code will work any better in that case, if the server only supports SSLv2. Of course, since no server should support SSLv2 (let alone only SSLv2), it shouldn't really be an issue.
One thing that I did not change, but that I do not understand; why does determine_optimal_proto() try the protocols in the order "-tls1_2 -tls1 -ssl3 -tls1_1 -ssl2" rather than "-tls1_2 -tls1_1 -tls1 -ssl3 -ssl2"? Doesn't the current ordering imply that TLS v1.0 and SSLv3 are better than TLS v1.1?
2016-04-29 23:04:01 +02:00
if [ [ " $OPTIMAL_PROTO " = = "-ssl2" ] ] ; then
$OPENSSL s_client $STARTTLS $BUGS $1 -showcerts -connect $NODEIP :$PORT $PROXY -ssl2 </dev/null 2>$ERRFILE >$TMPFILE
sclient_connect_successful $? $TMPFILE && success = 0
if [ [ $success -eq 0 ] ] ; then
# Place the server's certificate in $HOSTCERT and any intermediate
# certificates that were provided in $TEMPDIR/intermediatecerts.pem
savedir = $( pwd ) ; cd $TEMPDIR
# http://backreference.org/2010/05/09/ocsp-verification-with-openssl/
awk -v n = -1 ' /Server certificate/ { start = 1}
2016-10-28 15:30:07 +02:00
/-----BEGIN CERTIFICATE-----/{ if ( start) { inc = 1; n++} }
More SSLv2 (and SSLv3) related fixes
In doing some work on cipher_pref_check() I noticed that it was failing on SSLv2 since the call to "$OPENSSL s_client" includes SNI. I've also noticed in my testing that "$OPENSSL s_client" will not connect to an SSLv2-only server unless the "-ssl2" flag is included. So, I carefully checked each call to "$OPENSSL s_client" in the program (other than in run_allciphers and run_cipher_per_proto, since those functions are already addresses in PR #341) to see whether they would inappropriate fail with an SSLv2-only (or SSLv3-only) server.
As a general rule, if the call doesn't currently include the protocol, then I added "-ssl2" if $OPTIMAL_PROTO is "-ssl2", indicating that the server only supports SSLv2, and I removed any $SNI if a protocol is specified if a protocol is specified and it is either SSLv2 or SSLv3.
I tested it on an SSLv2-only server, and the results are much better. I also tested it on a collection of other servers, none of which support SSLv2, and the results are the same as with the current code.
The only thing I haven't been able to test is how the revised code works when the "--starttls" option is used. I don't believe the changes I made would cause anything to break in that case, but I also don't think code will work any better in that case, if the server only supports SSLv2. Of course, since no server should support SSLv2 (let alone only SSLv2), it shouldn't really be an issue.
One thing that I did not change, but that I do not understand; why does determine_optimal_proto() try the protocols in the order "-tls1_2 -tls1 -ssl3 -tls1_1 -ssl2" rather than "-tls1_2 -tls1_1 -tls1 -ssl3 -ssl2"? Doesn't the current ordering imply that TLS v1.0 and SSLv3 are better than TLS v1.1?
2016-04-29 23:04:01 +02:00
inc { print > ( "level" n ".crt" ) }
/---END CERTIFICATE-----/{ inc = 0 } ' $TMPFILE
nrsaved = $( count_words " $( echo level?.crt 2>/dev/null) " )
if [ [ $nrsaved -eq 0 ] ] ; then
success = 1
else
success = 0
mv level0.crt $HOSTCERT
if [ [ $nrsaved -eq 1 ] ] ; then
echo "" > $TEMPDIR /intermediatecerts.pem
else
cat level?.crt > $TEMPDIR /intermediatecerts.pem
rm level?.crt
fi
fi
cd " $savedir "
fi
tmpfile_handle $FUNCNAME .txt
return $success
fi
2016-07-05 16:19:53 +02:00
2017-02-21 11:16:14 +01:00
# this all needs to be moved into determine_tls_extensions()
>$TEMPDIR /tlsext.txt
# first shot w/o any protocol, then in turn we collect all extensions
2017-03-23 21:19:21 +01:00
$OPENSSL s_client $STARTTLS $BUGS $1 -showcerts -connect $NODEIP :$PORT $PROXY $SNI -tlsextdebug -status </dev/null 2>$ERRFILE >$TMPFILE
2017-02-21 11:16:14 +01:00
sclient_connect_successful $? $TMPFILE && grep -a 'TLS server extension' $TMPFILE >$TEMPDIR /tlsext.txt
2016-06-01 21:57:40 +02:00
for proto in $protocols_to_try ; do
2017-10-02 15:18:31 +02:00
[ [ 1 -eq $( has_server_protocol $proto ) ] ] && continue
More SSLv2 (and SSLv3) related fixes
In doing some work on cipher_pref_check() I noticed that it was failing on SSLv2 since the call to "$OPENSSL s_client" includes SNI. I've also noticed in my testing that "$OPENSSL s_client" will not connect to an SSLv2-only server unless the "-ssl2" flag is included. So, I carefully checked each call to "$OPENSSL s_client" in the program (other than in run_allciphers and run_cipher_per_proto, since those functions are already addresses in PR #341) to see whether they would inappropriate fail with an SSLv2-only (or SSLv3-only) server.
As a general rule, if the call doesn't currently include the protocol, then I added "-ssl2" if $OPTIMAL_PROTO is "-ssl2", indicating that the server only supports SSLv2, and I removed any $SNI if a protocol is specified if a protocol is specified and it is either SSLv2 or SSLv3.
I tested it on an SSLv2-only server, and the results are much better. I also tested it on a collection of other servers, none of which support SSLv2, and the results are the same as with the current code.
The only thing I haven't been able to test is how the revised code works when the "--starttls" option is used. I don't believe the changes I made would cause anything to break in that case, but I also don't think code will work any better in that case, if the server only supports SSLv2. Of course, since no server should support SSLv2 (let alone only SSLv2), it shouldn't really be an issue.
One thing that I did not change, but that I do not understand; why does determine_optimal_proto() try the protocols in the order "-tls1_2 -tls1 -ssl3 -tls1_1 -ssl2" rather than "-tls1_2 -tls1_1 -tls1 -ssl3 -ssl2"? Doesn't the current ordering imply that TLS v1.0 and SSLv3 are better than TLS v1.1?
2016-04-29 23:04:01 +02:00
addcmd = ""
2017-07-03 22:24:02 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS $1 -showcerts -connect $NODEIP : $PORT $PROXY $SNI - $proto -tlsextdebug $npn_params -status " ) </dev/null 2>$ERRFILE >$TMPFILE
2017-02-21 11:16:14 +01:00
if sclient_connect_successful $? $TMPFILE ; then
success = 0
grep -a 'TLS server extension' $TMPFILE >>$TEMPDIR /tlsext.txt
break # now we have the certificate
fi
2016-02-03 00:05:57 +01:00
done # this loop is needed for IIS6 and others which have a handshake size limitations
2015-10-11 23:07:16 +02:00
if [ [ $success -eq 7 ] ] ; then
2015-09-17 15:30:15 +02:00
# "-status" above doesn't work for GOST only servers, so we do another test without it and see whether that works then:
2017-07-03 22:24:02 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS $1 -showcerts -connect $NODEIP : $PORT $PROXY $SNI - $proto -tlsextdebug " ) </dev/null 2>>$ERRFILE >$TMPFILE
2015-10-11 23:07:16 +02:00
if ! sclient_connect_successful $? $TMPFILE ; then
2016-01-28 23:06:34 +01:00
if [ -z " $1 " ] ; then
2017-02-25 16:31:30 +01:00
prln_warning " Strange, no SSL/TLS protocol seems to be supported (error around line $(( LINENO - 6 )) ) "
2016-01-28 23:06:34 +01:00
fi
2015-10-11 23:07:16 +02:00
tmpfile_handle $FUNCNAME .txt
2015-09-17 15:30:15 +02:00
return 7 # this is ugly, I know
else
2017-02-21 11:16:14 +01:00
grep -a 'TLS server extension' $TMPFILE >>$TEMPDIR /tlsext.txt
2015-10-11 23:07:16 +02:00
GOST_STATUS_PROBLEM = true
2015-09-17 15:30:15 +02:00
fi
fi
2017-08-03 21:02:41 +02:00
case " $proto " in
"tls1_2" ) DETECTED_TLS_VERSION = "0303" ; ;
"tls1_1" ) DETECTED_TLS_VERSION = "0302" ; ;
"tls1" ) DETECTED_TLS_VERSION = "0301" ; ;
"ssl3" ) DETECTED_TLS_VERSION = "0300" ; ;
esac
2017-03-22 20:18:38 +01:00
extract_new_tls_extensions $TMPFILE
2016-01-28 23:06:34 +01:00
# Place the server's certificate in $HOSTCERT and any intermediate
# certificates that were provided in $TEMPDIR/intermediatecerts.pem
savedir = $( pwd ) ; cd $TEMPDIR
# http://backreference.org/2010/05/09/ocsp-verification-with-openssl/
awk -v n = -1 ' /Certificate chain/ { start = 1}
2016-10-28 15:30:07 +02:00
/-----BEGIN CERTIFICATE-----/{ if ( start) { inc = 1; n++} }
2016-01-28 23:06:34 +01:00
inc { print > ( "level" n ".crt" ) }
/---END CERTIFICATE-----/{ inc = 0 } ' $TMPFILE
nrsaved = $( count_words " $( echo level?.crt 2>/dev/null) " )
if [ [ $nrsaved -eq 0 ] ] ; then
success = 1
else
success = 0
mv level0.crt $HOSTCERT
if [ [ $nrsaved -eq 1 ] ] ; then
echo "" > $TEMPDIR /intermediatecerts.pem
else
cat level?.crt > $TEMPDIR /intermediatecerts.pem
rm level?.crt
fi
fi
cd " $savedir "
2015-10-11 23:07:16 +02:00
tmpfile_handle $FUNCNAME .txt
return $success
}
2016-02-03 00:05:57 +01:00
# arg1: path to certificate
# returns CN
get_cn_from_cert( ) {
local subject
# attention! openssl 1.0.2 doesn't properly handle online output from certifcates from trustwave.com/github.com
2016-10-28 15:30:07 +02:00
#FIXME: use -nameopt oid for robustness
2016-02-03 00:05:57 +01:00
# for e.g. russian sites -esc_msb,utf8 works in an UTF8 terminal -- any way to check platform indepedent?
# see x509(1ssl):
subject = " $( $OPENSSL x509 -in $1 -noout -subject -nameopt multiline,-align,sname,-esc_msb,utf8,-space_eq 2>>$ERRFILE ) "
echo " $( awk -F'=' '/CN=/ { print $2 }' <<< " $subject " ) "
return $?
}
2016-07-22 17:31:52 +02:00
# Return 0 if the name provided in arg1 is a wildcard name
is_wildcard( )
2016-06-01 21:57:40 +02:00
{
2016-07-22 17:31:52 +02:00
local certname = " $1 "
2016-06-01 21:57:40 +02:00
2016-07-22 17:31:52 +02:00
# If the first label in the DNS name begins "xn--", then assume it is an
# A-label and not a wildcard name (RFC 6125, Section 6.4.3).
[ [ " ${ certname : 0 : 4 } " = = "xn--" ] ] && return 1
# Remove part of name preceding '*' or '.'. If no "*" appears in the
# left-most label, then it is not a wildcard name (RFC 6125, Section 6.4.3).
basename = " $( echo -n " $certname " | sed 's/^[a-zA-Z0-9\-]*//' ) "
[ [ " ${ basename : 0 : 1 } " != "*" ] ] && return 1 # not a wildcard name
# Check that there are no additional wildcard ('*') characters or any
# other characters that do not belong in a DNS name.
[ [ -n $( echo -n " ${ basename : 1 } " | sed 's/^[\.a-zA-Z0-9\-]*//' ) ] ] && return 1
return 0
}
# Return 0 if the name provided in arg2 is a wildcard name and it matches the name provided in arg1.
wildcard_match( )
{
local servername = " $1 "
local certname = " $2 "
local basename
local -i basename_offset len_certname len_part1 len_basename
local -i len_servername len_wildcard
len_servername = ${# servername }
len_certname = ${# certname }
# Use rules from RFC 6125 to perform the match.
# Assume the "*" in the wildcard needs to be replaced by one or more
# characters, although RFC 6125 is not clear about that.
[ [ $len_servername -lt $len_certname ] ] && return 1
is_wildcard " $certname "
[ [ $? -ne 0 ] ] && return 1
# Comparisons of DNS names are case insenstive, so convert both names to uppercase.
certname = " $( toupper " $certname " ) "
servername = " $( toupper " $servername " ) "
# Extract part of name that comes after the "*"
basename = " $( echo -n " $certname " | sed 's/^[A-Z0-9\-]*\*//' ) "
len_basename = ${# basename }
len_part1 = $len_certname -$len_basename -1
len_wildcard = $len_servername -$len_certname +1
basename_offset = $len_servername -$len_basename
# Check that initial part of $servername matches initial part of $certname
2016-10-28 15:30:07 +02:00
# and that final part of $servername matches final part of $certname.
2016-07-22 17:31:52 +02:00
[ [ " ${ servername : 0 : len_part1 } " != " ${ certname : 0 : len_part1 } " ] ] && return 1
[ [ " ${ servername : basename_offset : len_basename } " != " $basename " ] ] && return 1
# Check that part of $servername that matches "*" is all part of a single
# domain label.
[ [ -n $( echo -n " ${ servername : len_part1 : len_wildcard } " | sed 's/^[A-Z0-9\-]*//' ) ] ] && return 1
return 0
}
# Compare the server name provided in arg1 to the CN and SAN in arg2 and return:
# 0, if server name provided does not match any of the names in the CN or SAN
# 1, if the server name provided matches a name in the SAN
# 2, if the server name provided is a wildcard match against a name in the SAN
# 4, if the server name provided matches the CN
# 5, if the server name provided matches the CN AND a name in the SAN
# 6, if the server name provided matches the CN AND is a wildcard match against a name in the SAN
# 8, if the server name provided is a wildcard match against the CN
# 9, if the server name provided matches a name in the SAN AND is a wildcard match against the CN
# 10, if the server name provided is a wildcard match against the CN AND a name in the SAN
compare_server_name_to_cert( )
{
local servername = " $( toupper " $1 " ) "
local cert = " $2 "
local cn dns_sans ip_sans san
local -i ret = 0
2016-06-01 21:57:40 +02:00
2016-06-13 17:09:15 +02:00
# Check whether any of the DNS names in the certificate match the servername
2017-04-01 10:38:04 +02:00
dns_sans = " $( get_san_dns_from_cert " $cert " ) "
2016-06-13 17:09:15 +02:00
for san in $dns_sans ; do
2016-07-22 17:31:52 +02:00
[ [ $( toupper " $san " ) = = " $servername " ] ] && ret = 1 && break
2016-06-01 21:57:40 +02:00
done
2016-06-13 17:09:15 +02:00
2016-09-12 16:09:00 +02:00
if [ [ $ret -eq 0 ] ] ; then
2016-07-22 17:31:52 +02:00
# Check whether any of the IP addresses in the certificate match the servername
ip_sans = $( $OPENSSL x509 -in " $cert " -noout -text 2>>$ERRFILE | grep -A2 "Subject Alternative Name" | \
tr ',' '\n' | grep "IP Address:" | sed -e 's/IP Address://g' -e 's/ //g' )
for san in $ip_sans ; do
[ [ " $san " = = " $servername " ] ] && ret = 1 && break
done
fi
# Check whether any of the DNS names in the certificate are wildcard names
# that match the servername
2016-09-12 16:09:00 +02:00
if [ [ $ret -eq 0 ] ] ; then
2016-07-22 17:31:52 +02:00
for san in $dns_sans ; do
wildcard_match " $servername " " $san "
[ [ $? -eq 0 ] ] && ret = 2 && break
done
fi
cn = " $( get_cn_from_cert " $cert " ) "
# If the CN contains any characters that are not valid for a DNS name,
# then assume it does not contain a DNS name.
2017-03-31 17:04:04 +02:00
[ [ -n $( sed 's/^[\.a-zA-Z0-9*\-]*//' <<< " $cn " ) ] ] && return $ret
2016-07-22 17:31:52 +02:00
# Check whether the CN in the certificate matches the servername
[ [ $( toupper " $cn " ) = = " $servername " ] ] && ret += 4 && return $ret
# Check whether the CN in the certificate is a wildcard name that matches
# the servername
wildcard_match " $servername " " $cn "
[ [ $? -eq 0 ] ] && ret += 8
return $ret
2016-06-01 21:57:40 +02:00
}
2015-10-11 23:07:16 +02:00
2017-02-17 21:20:37 +01:00
must_staple( ) {
2017-06-01 16:24:45 +02:00
local json_prefix = "OCSP must staple: "
local provides_stapling = " $2 "
2017-02-17 21:20:37 +01:00
local cert extn
local -i extn_len
local supported = false
2017-04-01 10:38:04 +02:00
2017-02-17 21:20:37 +01:00
# Note this function is only looking for status_request (5) and not
# status_request_v2 (17), since OpenSSL seems to only include status_request (5)
# in its ClientHello when the "-status" option is used.
2017-06-01 16:24:45 +02:00
2017-02-17 21:20:37 +01:00
# OpenSSL 1.1.0 supports pretty-printing the "TLS Feature extension." For any
# previous versions of OpenSSL, OpenSSL can only show if the extension OID is present.
if $OPENSSL x509 -in " $HOSTCERT " -noout -text 2>>$ERRFILE | grep -A 1 "TLS Feature:" | grep -q "status_request" ; then
# FIXME: This will indicate that must staple is supported if the
# certificate indicates status_request or status_request_v2. This is
# probably okay, since it seems likely that any TLS Feature extension
# that includes status_request_v2 will also include status_request.
supported = true
elif $OPENSSL x509 -in " $HOSTCERT " -noout -text 2>>$ERRFILE | grep -q "1.3.6.1.5.5.7.1.24:" ; then
cert = " $( $OPENSSL x509 -in " $HOSTCERT " -outform DER 2>>$ERRFILE | hexdump -v -e '16/1 "%02X"' ) "
extn = " ${ cert ##*06082B06010505070118 } "
# Check for critical bit, and skip over it if present.
[ [ " ${ extn : 0 : 6 } " = = "0101FF" ] ] && extn = " ${ extn : 6 } "
# Next is tag and length of extnValue OCTET STRING. Assume it is less than 128 bytes.
extn = " ${ extn : 4 } "
# The TLS Feature is a SEQUENCE of INTEGER. Get the length of the SEQUENCE
extn_len = 2*$( hex2dec " ${ extn : 2 : 2 } " )
# If the extension include the status_request (5), then it supports must staple.
2017-02-24 16:22:59 +01:00
if [ [ " ${ extn : 4 : extn_len } " = ~ 020105 ] ] ; then
2017-02-17 21:20:37 +01:00
supported = true
fi
fi
if " $supported " ; then
2017-06-01 16:24:45 +02:00
if " $provides_stapling " ; then
prln_done_good "supported"
fileout " ${ json_prefix } ocsp_must_staple " "OK" "OCSP must staple : supported"
else
2017-06-01 18:15:44 +02:00
prln_svrty_high "requires OCSP stapling (NOT ok)"
2017-06-01 16:24:45 +02:00
fileout " ${ json_prefix } " "HIGH" "must staple extension detected but no OCSP stapling provided"
fi
2017-02-17 21:20:37 +01:00
else
2017-06-01 16:24:45 +02:00
outln "no"
2017-02-17 21:20:37 +01:00
fileout " ${ json_prefix } ocsp_must_staple " "INFO" "OCSP must staple : no"
fi
}
2017-08-03 21:02:41 +02:00
# TODO: This function checks for Certificate Transparency support based on RFC 6962.
# It will need to be updated to add checks for Certificate Transparency support based on 6962bis.
certificate_transparency( ) {
local cert = " $1 "
local ocsp_response = " $2 "
local -i number_of_certificates = $3
local cipher = " $4 "
local sni_used = " $5 "
local tls_version = " $6 "
local sni = ""
local ciphers = ""
local hexc n ciph sslver kx auth enc mac export
local -i success
# First check whether signed certificate timestamps (SCT) are included in the
# server's certificate. If they aren't, check whether the server provided
# a stapled OCSP response with SCTs. If no SCTs were found in the certificate
# or OCSP response, check for an SCT TLS extension.
if $OPENSSL x509 -noout -text 2>>$ERRFILE <<< " $cert " | egrep -q "CT Precertificate SCTs|1.3.6.1.4.1.11129.2.4.2" ; then
tm_out "certificate extension"
return 0
fi
if egrep -q "CT Certificate SCTs|1.3.6.1.4.1.11129.2.4.5" <<< " $ocsp_response " ; then
tm_out "OCSP extension"
return 0
fi
# If the server only has one certificate, then it is sufficient to check whether
# determine_tls_extensions() discovered an SCT TLS extension. If the server has more than
# one certificate, then it is possible that an SCT TLS extension is returned for some
# certificates, but not for all of them.
if [ [ $number_of_certificates -eq 1 ] ] && [ [ " $TLS_EXTENSIONS " = ~ "signed certificate timestamps" ] ] ; then
tm_out "TLS extension"
return 0
fi
if [ [ $number_of_certificates -gt 1 ] ] && ! " $SSL_NATIVE " ; then
while read hexc n ciph sslver kx auth enc mac export; do
if [ [ ${# hexc } -eq 9 ] ] ; then
ciphers += " , ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } "
fi
done < <( $OPENSSL ciphers -V $cipher 2>>$ERRFILE )
[ [ -z " $sni_used " ] ] && sni = " $SNI " && SNI = ""
tls_sockets " ${ tls_version : 2 : 2 } " " ${ ciphers : 2 } " "all" "00,12,00,00"
success = $?
[ [ -z " $sni_used " ] ] && SNI = " $sni "
if ( [ [ $success -eq 0 ] ] || [ [ $success -eq 2 ] ] ) && \
grep -a 'TLS server extension ' " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " | \
grep -aq "signed certificate timestamps" ; then
tm_out "TLS extension"
return 0
fi
fi
if [ [ $SERVICE != "HTTP" ] ] ; then
# At the moment Certificate Transparency only applies to HTTPS.
tm_out "N/A"
else
tm_out "no"
fi
return 0
}
2016-01-28 23:06:34 +01:00
certificate_info( ) {
2015-10-11 23:07:16 +02:00
local proto
2016-01-28 23:06:34 +01:00
local -i certificate_number = $1
local -i number_of_certificates = $2
local cipher = $3
2016-03-05 21:07:49 +01:00
local cert_keysize = $4
2016-01-28 23:06:34 +01:00
local ocsp_response = $5
local ocsp_response_status = $6
2016-08-30 21:22:46 +02:00
local sni_used = $7
2017-08-03 21:02:41 +02:00
local ct = " $8 "
2016-05-19 22:45:56 +02:00
local cert_sig_algo cert_sig_hash_algo cert_key_algo
2017-06-01 16:24:45 +02:00
local expire days2expire secs2warn ocsp_uri crl
2017-02-17 21:20:37 +01:00
local startdate enddate issuer_CN issuer_C issuer_O issuer sans san all_san = "" cn
2016-07-20 17:45:08 +02:00
local issuer_DC issuerfinding cn_nosni = ""
2016-01-23 19:18:33 +01:00
local cert_fingerprint_sha1 cert_fingerprint_sha2 cert_fingerprint_serial
2015-10-11 23:07:16 +02:00
local policy_oid
2016-01-30 23:59:29 +01:00
local spaces = ""
2017-06-02 21:28:06 +02:00
local -i trust_sni = 0 trust_nosni = 0
local has_dns_sans has_dns_sans_nosni
local trust_sni_finding
2016-01-28 23:06:34 +01:00
local -i certificates_provided
2016-07-22 17:31:52 +02:00
local cnfinding trustfinding trustfinding_nosni
2016-01-23 19:18:33 +01:00
local cnok = "OK"
local expfinding expok = "OK"
2016-03-12 17:08:43 +01:00
local json_prefix = "" # string to place at beginng of JSON IDs when there is more than one certificate
2016-01-30 23:59:29 +01:00
local indent = ""
2016-06-06 13:42:17 +02:00
local days2warn2 = $DAYS2WARN2
local days2warn1 = $DAYS2WARN1
2017-06-01 16:24:45 +02:00
local provides_stapling = false
2017-10-18 18:43:54 +02:00
local caa_node = "" all_caa = "" caa_property_name = "" caa_property_value = ""
2015-10-11 23:07:16 +02:00
2016-01-28 23:06:34 +01:00
if [ [ $number_of_certificates -gt 1 ] ] ; then
2016-01-30 23:59:29 +01:00
[ [ $certificate_number -eq 1 ] ] && outln
indent = " "
out " $indent "
2016-08-30 21:22:46 +02:00
pr_headline " Server Certificate # $certificate_number "
[ [ -z " $sni_used " ] ] && pr_underline " (in response to request w/o SNI)"
outln
2016-02-09 19:35:46 +01:00
json_prefix = " Server Certificate # $certificate_number "
2016-01-30 23:59:29 +01:00
spaces = " "
2015-11-03 13:13:10 +01:00
else
2016-01-30 23:59:29 +01:00
spaces = " "
2015-11-03 13:13:10 +01:00
fi
2016-10-28 15:30:07 +02:00
2016-03-05 21:07:49 +01:00
cert_sig_algo = $( $OPENSSL x509 -in $HOSTCERT -noout -text 2>>$ERRFILE | grep "Signature Algorithm" | sed 's/^.*Signature Algorithm: //' | sort -u )
cert_key_algo = $( $OPENSSL x509 -in $HOSTCERT -noout -text 2>>$ERRFILE | awk -F':' '/Public Key Algorithm:/ { print $2 }' | sort -u )
2015-09-17 15:30:15 +02:00
2016-02-03 09:55:47 +01:00
out " $indent " ; pr_bold " Signature Algorithm "
2016-03-05 21:07:49 +01:00
case $cert_sig_algo in
2016-02-03 09:55:47 +01:00
sha1WithRSAEncryption)
2016-09-05 10:01:46 +02:00
pr_svrty_medium "SHA1 with RSA"
if [ [ " $SERVICE " = = HTTP ] ] ; then
2016-09-07 21:34:27 +02:00
out " -- besides: users will receive a " ; pr_svrty_high "strong browser WARNING"
2016-09-05 10:01:46 +02:00
fi
outln
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "MEDIUM" "Signature Algorithm: SHA1 with RSA"
2016-02-03 09:55:47 +01:00
; ;
2016-05-19 22:45:56 +02:00
sha224WithRSAEncryption)
outln "SHA224 with RSA"
fileout " ${ json_prefix } algorithm " "INFO" "Signature Algorithm: SHA224 with RSA"
; ;
2016-02-03 09:55:47 +01:00
sha256WithRSAEncryption)
2017-02-25 16:31:30 +01:00
prln_done_good "SHA256 with RSA"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "OK" "Signature Algorithm: SHA256 with RSA"
2016-02-03 09:55:47 +01:00
; ;
sha384WithRSAEncryption)
2017-02-25 16:31:30 +01:00
prln_done_good "SHA384 with RSA"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "OK" "Signature Algorithm: SHA384 with RSA"
2016-02-03 09:55:47 +01:00
; ;
sha512WithRSAEncryption)
2017-02-25 16:31:30 +01:00
prln_done_good "SHA512 with RSA"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "OK" "Signature Algorithm: SHA512 with RSA"
2016-02-03 09:55:47 +01:00
; ;
2016-05-19 22:45:56 +02:00
ecdsa-with-SHA1)
2017-02-25 16:31:30 +01:00
prln_svrty_medium "ECDSA with SHA1"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "MEDIUM" "Signature Algorithm: ECDSA with SHA1"
2016-05-19 22:45:56 +02:00
; ;
ecdsa-with-SHA224)
outln "ECDSA with SHA224"
fileout " ${ json_prefix } algorithm " "INFO" "Signature Algorithm: ECDSA with SHA224"
; ;
2016-02-03 09:55:47 +01:00
ecdsa-with-SHA256)
2017-02-25 16:31:30 +01:00
prln_done_good "ECDSA with SHA256"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "OK" "Signature Algorithm: ECDSA with SHA256"
2016-02-03 09:55:47 +01:00
; ;
2016-05-19 22:45:56 +02:00
ecdsa-with-SHA384)
2017-02-25 16:31:30 +01:00
prln_done_good "ECDSA with SHA384"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "OK" "Signature Algorithm: ECDSA with SHA384"
2016-05-19 22:45:56 +02:00
; ;
ecdsa-with-SHA512)
2017-02-25 16:31:30 +01:00
prln_done_good "ECDSA with SHA512"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "OK" "Signature Algorithm: ECDSA with SHA512"
2016-05-19 22:45:56 +02:00
; ;
dsaWithSHA1)
2017-02-25 16:31:30 +01:00
prln_svrty_medium "DSA with SHA1"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "MEDIUM" "Signature Algorithm: DSA with SHA1"
2016-05-19 22:45:56 +02:00
; ;
dsa_with_SHA224)
outln "DSA with SHA224"
fileout " ${ json_prefix } algorithm " "INFO" "Signature Algorithm: DSA with SHA224"
; ;
dsa_with_SHA256)
2017-02-25 16:31:30 +01:00
prln_done_good "DSA with SHA256"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "OK" "Signature Algorithm: DSA with SHA256"
2016-05-19 22:45:56 +02:00
; ;
rsassaPss)
cert_sig_hash_algo = " $( $OPENSSL x509 -in $HOSTCERT -noout -text 2>>$ERRFILE | grep -A 1 "Signature Algorithm" | head -2 | tail -1 | sed 's/^.*Hash Algorithm: //' ) "
case $cert_sig_hash_algo in
sha1)
2017-02-25 16:31:30 +01:00
prln_svrty_medium "RSASSA-PSS with SHA1"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "MEDIUM" "Signature Algorithm: RSASSA-PSS with SHA1"
2016-05-19 22:45:56 +02:00
; ;
sha224)
outln "RSASSA-PSS with SHA224"
fileout " ${ json_prefix } algorithm " "INFO" "Signature Algorithm: RSASSA-PSS with SHA224"
; ;
sha256)
2017-02-25 16:31:30 +01:00
prln_done_good "RSASSA-PSS with SHA256"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "OK" "Signature Algorithm: RSASSA-PSS with SHA256"
2016-05-19 22:45:56 +02:00
; ;
sha384)
2017-02-25 16:31:30 +01:00
prln_done_good "RSASSA-PSS with SHA384"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "OK" "Signature Algorithm: RSASSA-PSS with SHA384"
2016-05-19 22:45:56 +02:00
; ;
sha512)
2017-02-25 16:31:30 +01:00
prln_done_good "RSASSA-PSS with SHA512"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "OK" "Signature Algorithm: RSASSA-PSS with SHA512"
2016-05-19 22:45:56 +02:00
; ;
*)
out " RSASSA-PSS with $cert_sig_hash_algo "
2017-02-25 16:31:30 +01:00
prln_warning " (Unknown hash algorithm)"
2016-06-06 13:42:17 +02:00
fileout " ${ json_prefix } algorithm " "DEBUG" " Signature Algorithm: RSASSA-PSS with $cert_sig_hash_algo "
2016-05-19 22:45:56 +02:00
esac
; ;
md2*)
2017-02-25 16:31:30 +01:00
prln_svrty_critical "MD2"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "CRITICAL" "Signature Algorithm: MD2"
2016-05-19 22:45:56 +02:00
; ;
md4*)
2017-02-25 16:31:30 +01:00
prln_svrty_critical "MD4"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "CRITICAL" "Signature Algorithm: MD4"
2016-05-19 22:45:56 +02:00
; ;
2016-02-03 09:55:47 +01:00
md5*)
2017-02-25 16:31:30 +01:00
prln_svrty_critical "MD5"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } algorithm " "CRITICAL" "Signature Algorithm: MD5"
2016-02-03 09:55:47 +01:00
; ;
*)
2016-03-05 21:07:49 +01:00
out " $cert_sig_algo ( "
2016-07-01 12:03:46 +02:00
pr_warning "FIXME: can't tell whether this is good or not"
2016-02-03 09:55:47 +01:00
outln ")"
2016-09-12 16:20:05 +02:00
fileout " ${ json_prefix } algorithm " "DEBUG" " Signature Algorithm: $cert_sig_algo "
2016-02-03 09:55:47 +01:00
; ;
esac
# old, but interesting: https://blog.hboeck.de/archives/754-Playing-with-the-EFF-SSL-Observatory.html
2016-02-03 17:55:53 +01:00
out " $indent " ; pr_bold " Server key size "
2016-03-05 21:07:49 +01:00
if [ [ -z " $cert_keysize " ] ] ; then
2015-09-17 15:30:15 +02:00
outln "(couldn't determine)"
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } key_size " "WARN" "Server keys size cannot be determined"
2015-09-17 15:30:15 +02:00
else
2016-06-06 13:42:17 +02:00
case $cert_key_algo in
*RSA*| *rsa*) out "RSA " ; ;
*DSA*| *dsa*) out "DSA " ; ;
*ecdsa*| *ecPublicKey) out "ECDSA " ; ;
*GOST*| *gost*) out "GOST " ; ;
2016-08-25 21:23:53 +02:00
*dh*| *DH*) out "DH " ; ;
2017-03-18 15:57:16 +01:00
*) pr_fixme: " don't know $cert_key_algo " ; ;
2016-06-06 13:42:17 +02:00
esac
2016-02-01 10:19:23 +01:00
# https://tools.ietf.org/html/rfc4492, http://www.keylength.com/en/compare/
# http://infoscience.epfl.ch/record/164526/files/NPDF-22.pdf
# see http://csrc.nist.gov/publications/nistpubs/800-57/sp800-57_part1_rev3_general.pdf
# Table 2 @ chapter 5.6.1 (~ p64)
2016-05-19 22:39:06 +02:00
if [ [ $cert_key_algo = ~ ecdsa ] ] || [ [ $cert_key_algo = ~ ecPublicKey ] ] ; then
2016-10-28 15:30:07 +02:00
if [ [ " $cert_keysize " -le 110 ] ] ; then # a guess
2016-03-05 21:07:49 +01:00
pr_svrty_critical " $cert_keysize "
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } key_size " "CRITICAL" " Server keys $cert_keysize EC bits "
2016-03-05 21:07:49 +01:00
elif [ [ " $cert_keysize " -le 123 ] ] ; then # a guess
pr_svrty_high " $cert_keysize "
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } key_size " "HIGH" " Server keys $cert_keysize EC bits "
2016-03-05 21:07:49 +01:00
elif [ [ " $cert_keysize " -le 163 ] ] ; then
pr_svrty_medium " $cert_keysize "
2016-05-27 17:43:45 +02:00
fileout " ${ json_prefix } key_size " "MEDIUM" " Server keys $cert_keysize EC bits "
2016-03-05 21:07:49 +01:00
elif [ [ " $cert_keysize " -le 224 ] ] ; then
out " $cert_keysize "
fileout " ${ json_prefix } key_size " "INFO" " Server keys $cert_keysize EC bits "
elif [ [ " $cert_keysize " -le 533 ] ] ; then
pr_done_good " $cert_keysize "
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } key_size " "OK" " Server keys $cert_keysize EC bits "
2015-09-17 15:30:15 +02:00
else
2016-03-05 21:07:49 +01:00
out " keysize: $cert_keysize (not expected, FIXME) "
2016-05-27 17:43:45 +02:00
fileout " ${ json_prefix } key_size " "DEBUG" " Server keys $cert_keysize bits (not expected) "
2016-02-01 10:19:23 +01:00
fi
2016-04-21 18:04:33 +02:00
outln " bits"
2016-08-25 21:23:53 +02:00
elif [ [ $cert_key_algo = *RSA* ] ] || [ [ $cert_key_algo = *rsa* ] ] || [ [ $cert_key_algo = *dsa* ] ] || \
[ [ $cert_key_algo = ~ dhKeyAgreement ] ] || [ [ $cert_key_algo = ~ "X9.42 DH" ] ] ; then
2016-03-05 21:07:49 +01:00
if [ [ " $cert_keysize " -le 512 ] ] ; then
pr_svrty_critical " $cert_keysize "
2016-02-07 19:13:59 +01:00
outln " bits"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } key_size " "CRITICAL" " Server keys $cert_keysize bits "
2016-03-05 21:07:49 +01:00
elif [ [ " $cert_keysize " -le 768 ] ] ; then
pr_svrty_high " $cert_keysize "
2016-02-07 19:13:59 +01:00
outln " bits"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } key_size " "HIGH" " Server keys $cert_keysize bits "
2016-03-05 21:07:49 +01:00
elif [ [ " $cert_keysize " -le 1024 ] ] ; then
pr_svrty_medium " $cert_keysize "
2016-02-07 19:13:59 +01:00
outln " bits"
2016-05-27 17:43:45 +02:00
fileout " ${ json_prefix } key_size " "MEDIUM" " Server keys $cert_keysize bits "
2016-03-05 21:07:49 +01:00
elif [ [ " $cert_keysize " -le 2048 ] ] ; then
outln " $cert_keysize bits "
fileout " ${ json_prefix } key_size " "INFO" " Server keys $cert_keysize bits "
elif [ [ " $cert_keysize " -le 4096 ] ] ; then
pr_done_good " $cert_keysize "
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } key_size " "OK" " Server keys $cert_keysize bits "
2016-02-07 19:13:59 +01:00
outln " bits"
2016-02-01 10:19:23 +01:00
else
2017-05-19 20:28:18 +02:00
pr_warning " weird key size: $cert_keysize bits " ; outln " (could cause compatibility problems)"
2016-03-05 21:07:49 +01:00
fileout " ${ json_prefix } key_size " "WARN" " Server keys $cert_keysize bits (Odd) "
2015-09-17 15:30:15 +02:00
fi
2016-02-03 09:55:47 +01:00
else
2016-03-05 21:07:49 +01:00
out " $cert_keysize bits ( "
2016-07-01 12:03:46 +02:00
pr_warning "FIXME: can't tell whether this is good or not"
2016-02-03 09:55:47 +01:00
outln ")"
2016-03-05 21:07:49 +01:00
fileout " ${ json_prefix } key_size " "WARN" " Server keys $cert_keysize bits (unknown signature algorithm) "
2015-09-17 15:30:15 +02:00
fi
fi
2016-01-30 23:59:29 +01:00
out " $indent " ; pr_bold " Fingerprint / Serial "
2016-01-23 19:18:33 +01:00
cert_fingerprint_sha1 = " $( $OPENSSL x509 -noout -in $HOSTCERT -fingerprint -sha1 2>>$ERRFILE | sed 's/Fingerprint=//' | sed 's/://g' ) "
cert_fingerprint_serial = " $( $OPENSSL x509 -noout -in $HOSTCERT -serial 2>>$ERRFILE | sed 's/serial=//' ) "
cert_fingerprint_sha2 = " $( $OPENSSL x509 -noout -in $HOSTCERT -fingerprint -sha256 2>>$ERRFILE | sed 's/Fingerprint=//' | sed 's/://g' ) "
outln " $cert_fingerprint_sha1 / $cert_fingerprint_serial "
outln " $spaces $cert_fingerprint_sha2 "
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } fingerprint " "INFO" " Fingerprints / Serial: $cert_fingerprint_sha1 / $cert_fingerprint_serial , $cert_fingerprint_sha2 "
2016-03-03 21:47:36 +01:00
[ [ -z $CERT_FINGERPRINT_SHA2 ] ] && \
2016-10-28 15:30:07 +02:00
CERT_FINGERPRINT_SHA2 = " $cert_fingerprint_sha2 " ||
2016-03-03 19:50:44 +01:00
CERT_FINGERPRINT_SHA2 = " $cert_fingerprint_sha2 $CERT_FINGERPRINT_SHA2 "
run_drown() when server has non-RSA certificates
This PR address a problem in `run_drown()` when the server does not support SSLv2, but does support multiple certificates or doesn't have an RSA certificate.
One example of the problem can be seen with www.facebook.com. If `run_server_preferences()` is run before `run_drown()`, then the results of `run_drown()` are:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 SHA256 A3F474FB17509AE6C5B6BA5E46B79E0DE6AF1BF1EEAA040A6114676E714C9965 could help you to find out
```
If only `run_drown()` is performed, then the result is:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 could help you to find out
```
However, A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 is the fingerprint of Facebook's ECDSA certificate, not its RSA certificate.
In addition, as noted in the "FIXME," `run_drown()` will display the warning "make sure you don't use this certificate elsewhere with SSLv2 enabled services" even if the server doesn't have an RSA certificate, even though SSLv2 can only use RSA certificates.
This PR fixes this issue by only showing the warning if the server has an RSA certificate and by ensuring that the `$cert_fingerprint_sha2` used to construct the "https://censys.io/ipv4?q=..." URL only contains a single SHA256 fingerprint and that it is the fingerprint of the server's RSA certificate.
2016-12-02 16:16:04 +01:00
[ [ -z $RSA_CERT_FINGERPRINT_SHA2 ] ] && \
( [ [ $cert_key_algo = *RSA* ] ] || [ [ $cert_key_algo = *rsa* ] ] ) &&
RSA_CERT_FINGERPRINT_SHA2 = " $cert_fingerprint_sha2 "
2015-09-17 15:30:15 +02:00
2016-01-30 23:59:29 +01:00
out " $indent " ; pr_bold " Common Name (CN) "
2016-01-23 19:18:33 +01:00
cnfinding = "Common Name (CN) : "
2016-02-03 00:05:57 +01:00
cn = " $( get_cn_from_cert $HOSTCERT ) "
if [ [ -n " $cn " ] ] ; then
2017-01-21 19:43:07 +01:00
pr_italic " $cn "
2016-01-23 19:18:33 +01:00
cnfinding = " $cn "
2015-09-17 15:30:15 +02:00
else
2016-02-03 17:55:53 +01:00
cn = "no CN field in subject"
2016-07-05 00:02:34 +02:00
out " ( $cn ) "
2016-01-23 19:18:33 +01:00
cnfinding = " $cn "
cnok = "INFO"
2015-09-17 15:30:15 +02:00
fi
2016-08-30 21:22:46 +02:00
if [ [ -n " $sni_used " ] ] ; then
if grep -q "\-\-\-\-\-BEGIN" " $HOSTCERT .nosni " ; then
cn_nosni = " $( get_cn_from_cert " $HOSTCERT .nosni " ) "
[ [ -z " $cn_nosni " ] ] && cn_nosni = "no CN field in subject"
fi
2017-02-25 16:31:30 +01:00
debugme tm_out " \" $NODE \" | \" $cn \" | \" $cn_nosni \" "
2016-08-30 21:22:46 +02:00
else
2017-02-25 16:31:30 +01:00
debugme tm_out " \" $NODE \" | \" $cn \" "
2016-07-20 17:38:55 +02:00
fi
2016-02-03 00:05:57 +01:00
#FIXME: check for SSLv3/v2 and look whether it goes to a different CN (probably not polite)
2015-09-17 15:30:15 +02:00
2016-08-30 21:22:46 +02:00
if [ [ -z " $sni_used " ] ] || [ [ " $( toupper " $cn_nosni " ) " = = " $( toupper " $cn " ) " ] ] ; then
2016-07-22 17:31:52 +02:00
outln
elif [ [ -z " $cn_nosni " ] ] ; then
out " (request w/o SNI didn't succeed" ;
cnfinding += " (request w/o SNI didn't succeed"
if [ [ $cert_sig_algo = ~ ecdsa ] ] ; then
out ", usual for EC certificates"
cnfinding += ", usual for EC certificates"
2015-09-17 15:30:15 +02:00
fi
2016-07-22 17:31:52 +02:00
outln ")"
cnfinding += ")"
elif [ [ " $cn_nosni " = = *"no CN field" * ] ] ; then
outln " , (request w/o SNI: $cn_nosni ) "
cnfinding += " , (request w/o SNI: $cn_nosni ) "
2015-09-17 15:30:15 +02:00
else
2017-01-21 19:43:07 +01:00
out " (CN in response to request w/o SNI: " ; pr_italic " $cn_nosni " ; outln ")"
2016-07-22 17:31:52 +02:00
cnfinding += " (CN in response to request w/o SNI: \" $cn_nosni \") "
2015-09-17 15:30:15 +02:00
fi
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } cn " " $cnok " " $cnfinding "
2015-09-17 15:30:15 +02:00
2016-07-05 00:08:51 +02:00
sans = $( $OPENSSL x509 -in $HOSTCERT -noout -text 2>>$ERRFILE | grep -A2 "Subject Alternative Name" | \
2016-07-11 14:35:55 +02:00
egrep "DNS:|IP Address:|email:|URI:|DirName:|Registered ID:" | tr ',' '\n' | \
2016-06-13 18:52:19 +02:00
sed -e 's/ *DNS://g' -e 's/ *IP Address://g' -e 's/ *email://g' -e 's/ *URI://g' -e 's/ *DirName://g' \
2016-07-11 14:35:55 +02:00
-e 's/ *Registered ID://g' \
2016-06-13 18:52:19 +02:00
-e 's/ *othername:<unsupported>//g' -e 's/ *X400Name:<unsupported>//g' -e 's/ *EdiPartyName:<unsupported>//g' )
# ^^^ CACert
2016-01-30 23:59:29 +01:00
out " $indent " ; pr_bold " subjectAltName (SAN) "
2015-09-17 15:30:15 +02:00
if [ [ -n " $sans " ] ] ; then
2016-06-13 18:52:19 +02:00
while read san; do
2017-02-09 17:36:24 +01:00
[ [ -n " $san " ] ] && all_san += " $san "
2016-06-13 18:52:19 +02:00
done <<< " $sans "
2017-05-04 12:34:20 +02:00
prln_italic " $( out_row_aligned_max_width " $all_san " " $indent " $TERM_WIDTH ) "
2017-02-09 17:36:24 +01:00
fileout " ${ json_prefix } san " "INFO" " subjectAltName (SAN) : $all_san "
2015-09-17 15:30:15 +02:00
else
2017-05-04 12:34:20 +02:00
if [ [ $SERVICE = = "HTTP" ] ] ; then
2017-06-01 15:19:21 +02:00
pr_svrty_high "missing (NOT ok)" ; outln " -- Browsers are complaining"
fileout " ${ json_prefix } san " "HIGH" "subjectAltName (SAN) : -- Browsers are complaining"
2017-05-04 12:34:20 +02:00
else
2017-06-01 15:19:21 +02:00
pr_svrty_medium "missing" ; outln " -- no SAN is deprecated"
fileout " ${ json_prefix } san " "MEDIUM" "subjectAltName (SAN) : -- no SAN is deprecated"
2017-05-04 12:34:20 +02:00
fi
2015-09-17 15:30:15 +02:00
fi
2016-01-30 23:59:29 +01:00
out " $indent " ; pr_bold " Issuer "
2016-02-03 00:05:57 +01:00
#FIXME: oid would be better maybe (see above)
issuer = " $( $OPENSSL x509 -in $HOSTCERT -noout -issuer -nameopt multiline,-align,sname,-esc_msb,utf8,-space_eq 2>>$ERRFILE ) "
issuer_CN = " $( awk -F'=' '/CN=/ { print $2 }' <<< " $issuer " ) "
issuer_O = " $( awk -F'=' '/O=/ { print $2 }' <<< " $issuer " ) "
2016-07-20 16:50:38 +02:00
issuer_C = " $( awk -F'=' '/ C=/ { print $2 }' <<< " $issuer " ) "
2016-07-20 17:37:51 +02:00
issuer_DC = " $( awk -F'=' '/DC=/ { print $2 }' <<< " $issuer " ) "
2016-02-03 00:05:57 +01:00
2016-07-22 18:06:52 +02:00
if [ [ " $issuer_O " = = "issuer=" ] ] || [ [ " $issuer_O " = = "issuer= " ] ] || [ [ " $issuer_CN " = = " $cn " ] ] ; then
2017-02-25 16:31:30 +01:00
prln_svrty_critical "self-signed (NOT ok)"
2016-11-17 23:27:27 +01:00
fileout " ${ json_prefix } issuer " "CRITICAL" "Issuer: selfsigned"
2015-09-17 15:30:15 +02:00
else
2017-02-07 20:25:41 +01:00
issuerfinding = " $issuer_CN "
pr_italic " $issuer_CN "
2016-07-20 17:37:51 +02:00
if [ [ -z " $issuer_O " ] ] && [ [ -n " $issuer_DC " ] ] ; then
for san in $issuer_DC ; do
if [ [ -z " $issuer_O " ] ] ; then
issuer_O = " ${ san } "
else
issuer_O = " ${ san } . ${ issuer_O } "
fi
done
2015-10-15 14:15:07 +02:00
fi
2016-07-20 17:37:51 +02:00
if [ [ -n " $issuer_O " ] ] ; then
issuerfinding += " ("
2017-02-07 20:25:41 +01:00
out " ("
issuerfinding += " $issuer_O "
pr_italic " $issuer_O "
2016-07-20 17:37:51 +02:00
if [ [ -n " $issuer_C " ] ] ; then
issuerfinding += " from "
2017-02-07 20:25:41 +01:00
out " from "
issuerfinding += " $issuer_C "
pr_italic " $issuer_C "
2016-07-20 17:37:51 +02:00
fi
issuerfinding += ")"
2017-02-07 20:25:41 +01:00
out ")"
2015-10-15 14:15:07 +02:00
fi
2017-02-07 20:25:41 +01:00
outln
2016-07-20 17:37:51 +02:00
fileout " ${ json_prefix } issuer " "INFO" " Issuer: $issuerfinding "
2015-09-17 15:30:15 +02:00
fi
2016-07-23 11:17:49 +02:00
out " $indent " ; pr_bold " Trust (hostname) "
2016-07-22 17:31:52 +02:00
compare_server_name_to_cert " $NODE " " $HOSTCERT "
trust_sni = $?
# Find out if the subjectAltName extension is present and contains
# a DNS name, since Section 6.3 of RFC 6125 says:
# Security Warning: A client MUST NOT seek a match for a reference
# identifier of CN-ID if the presented identifiers include a DNS-ID,
# SRV-ID, URI-ID, or any application-specific identifier types
# supported by the client.
$OPENSSL x509 -in $HOSTCERT -noout -text 2>>$ERRFILE | \
grep -A2 "Subject Alternative Name" | grep -q "DNS:" && \
has_dns_sans = true || has_dns_sans = false
case $trust_sni in
2016-07-23 11:17:49 +02:00
0) trustfinding = "certificate does not match supplied URI" ; ;
2016-07-22 17:31:52 +02:00
1) trustfinding = "Ok via SAN" ; ;
2) trustfinding = "Ok via SAN wildcard" ; ;
2017-06-01 15:19:21 +02:00
4) if " $has_dns_sans " ; then
trustfinding = "via CN, but not SAN"
2016-07-22 17:31:52 +02:00
else
2017-06-01 15:19:21 +02:00
trustfinding = "via CN only"
2016-07-22 17:31:52 +02:00
fi
; ;
5) trustfinding = "Ok via SAN and CN" ; ;
6) trustfinding = "Ok via SAN wildcard and CN"
; ;
2017-06-01 15:19:21 +02:00
8) if " $has_dns_sans " ; then
trustfinding = "via CN wildcard, but not SAN"
2016-07-22 17:31:52 +02:00
else
2017-06-01 15:19:21 +02:00
trustfinding = "via CN (wildcard) only"
2016-07-22 17:31:52 +02:00
fi
; ;
9) trustfinding = "Ok via CN wildcard and SAN"
; ;
10) trustfinding = "Ok via SAN wildcard and CN wildcard"
; ;
esac
if [ [ $trust_sni -eq 0 ] ] ; then
2017-06-02 21:28:06 +02:00
pr_svrty_high " $trustfinding "
trust_sni_finding = "HIGH"
2017-06-01 15:19:21 +02:00
elif ( [ [ $trust_sni -eq 4 ] ] || [ [ $trust_sni -eq 8 ] ] ) ; then
2017-06-02 21:28:06 +02:00
if [ [ $SERVICE = = "HTTP" ] ] ; then
# https://bugs.chromium.org/p/chromium/issues/detail?id=308330
# https://bugzilla.mozilla.org/show_bug.cgi?id=1245280
# https://www.chromestatus.com/feature/4981025180483584
pr_svrty_high " $trustfinding " ; out " -- Browsers are complaining"
trust_sni_finding = "HIGH"
2017-06-01 15:19:21 +02:00
else
2017-06-02 21:28:06 +02:00
pr_svrty_medium " $trustfinding "
trust_sni_finding = "MEDIUM"
# we punish CN matching for non-HTTP as it is deprecated https://tools.ietf.org/html/rfc2818#section-3.1
! " $has_dns_sans " && out " -- CN only match is deprecated"
2017-06-01 15:19:21 +02:00
fi
2016-07-22 17:31:52 +02:00
else
2016-07-23 11:17:49 +02:00
pr_done_good " $trustfinding "
2017-06-02 21:28:06 +02:00
trust_sni_finding = "OK"
2016-07-22 17:31:52 +02:00
fi
if [ [ -n " $cn_nosni " ] ] ; then
compare_server_name_to_cert " $NODE " " $HOSTCERT .nosni "
trust_nosni = $?
$OPENSSL x509 -in " $HOSTCERT .nosni " -noout -text 2>>$ERRFILE | \
grep -A2 "Subject Alternative Name" | grep -q "DNS:" && \
2017-06-02 21:28:06 +02:00
has_dns_sans_nosni = true || has_dns_sans_nosni = false
2016-07-22 17:31:52 +02:00
fi
2017-06-02 21:28:06 +02:00
# See issue #733.
2016-08-30 21:22:46 +02:00
if [ [ -z " $sni_used " ] ] ; then
trustfinding_nosni = ""
2017-10-20 15:09:52 +02:00
elif ( [ [ $trust_sni -eq $trust_nosni ] ] && [ [ " $has_dns_sans " = = " $has_dns_sans_nosni " ] ] ) || \
2017-06-02 21:28:06 +02:00
( [ [ $trust_sni -eq 0 ] ] && [ [ $trust_nosni -eq 0 ] ] ) ; then
trustfinding_nosni = " (same w/o SNI)"
elif [ [ $trust_nosni -eq 0 ] ] ; then
if [ [ $trust_sni -eq 4 ] ] || [ [ $trust_sni -eq 8 ] ] ; then
trustfinding_nosni = " (w/o SNI: certificate does not match supplied URI)"
else
trustfinding_nosni = " (SNI mandatory)"
fi
elif [ [ $trust_nosni -eq 4 ] ] || [ [ $trust_nosni -eq 8 ] ] || [ [ $trust_sni -eq 4 ] ] || [ [ $trust_sni -eq 8 ] ] ; then
case $trust_nosni in
1) trustfinding_nosni = "(w/o SNI: Ok via SAN)" ; ;
2) trustfinding_nosni = "(w/o SNI: Ok via SAN wildcard)" ; ;
4) if " $has_dns_sans_nosni " ; then
trustfinding_nosni = "(w/o SNI: via CN, but not SAN)"
else
trustfinding_nosni = "(w/o SNI: via CN only)"
fi
; ;
5) trustfinding_nosni = "(w/o SNI: Ok via SAN and CN)" ; ;
6) trustfinding_nosni = "(w/o SNI: Ok via SAN wildcard and CN)" ; ;
8) if " $has_dns_sans_nosni " ; then
trustfinding_nosni = "(w/o SNI: via CN wildcard, but not SAN)"
else
trustfinding_nosni = "(w/o SNI: via CN (wildcard) only)"
fi
; ;
9) trustfinding_nosni = "(w/o SNI: Ok via CN wildcard and SAN)" ; ;
10) trustfinding_nosni = "(w/o SNI: Ok via SAN wildcard and CN wildcard)" ; ;
esac
elif [ [ $trust_sni -ne 0 ] ] ; then
2016-07-22 17:31:52 +02:00
trustfinding_nosni = " (works w/o SNI)"
else
2017-06-02 21:28:06 +02:00
trustfinding_nosni = " (however, works w/o SNI)"
2016-07-22 17:31:52 +02:00
fi
2017-06-02 21:28:06 +02:00
if [ [ -n " $sni_used " ] ] || [ [ $trust_nosni -eq 0 ] ] || ( [ [ $trust_nosni -ne 4 ] ] && [ [ $trust_nosni -ne 8 ] ] ) ; then
2016-07-22 17:31:52 +02:00
outln " $trustfinding_nosni "
2017-06-02 21:28:06 +02:00
elif [ [ $SERVICE = = "HTTP" ] ] ; then
prln_svrty_high " $trustfinding_nosni "
2016-07-22 17:31:52 +02:00
else
2017-06-02 21:28:06 +02:00
prln_svrty_medium " $trustfinding_nosni "
2016-07-22 17:31:52 +02:00
fi
2017-06-02 21:28:06 +02:00
fileout " ${ json_prefix } trust " " $trust_sni_finding " " ${ trustfinding } ${ trustfinding_nosni } "
2016-07-23 14:52:26 +02:00
out " $indent " ; pr_bold " Chain of trust" ; out " "
2017-05-09 21:58:03 +02:00
if [ [ " $issuer_O " = ~ StartCom ] ] || [ [ " $issuer_O " = ~ WoSign ] ] || [ [ " $issuer_CN " = ~ StartCom ] ] || [ [ " $issuer_CN " = ~ WoSign ] ] ; then
# Shortcut for this special case here.
pr_italic "WoSign/StartCom" ; out " are " ; prln_svrty_critical "not trusted anymore (NOT ok)"
fileout " ${ json_prefix } issuer " "CRITICAL" "Issuer: not trusted anymore (WoSign/StartCom)"
else
determine_trust " $json_prefix " # Also handles fileout
fi
2016-07-23 14:52:26 +02:00
2015-09-17 15:30:15 +02:00
# http://events.ccc.de/congress/2010/Fahrplan/attachments/1777_is-the-SSLiverse-a-safe-place.pdf, see page 40pp
2016-01-30 23:59:29 +01:00
out " $indent " ; pr_bold " EV cert" ; out " (experimental) "
2016-04-21 18:44:57 +02:00
# only the first one, seldom we have two
policy_oid = $( $OPENSSL x509 -in $HOSTCERT -text 2>>$ERRFILE | awk '/ .Policy: / { print $2 }' | awk 'NR < 2' )
2015-09-17 15:30:15 +02:00
if echo " $issuer " | egrep -q 'Extended Validation|Extended Validated|EV SSL|EV CA' || \
2015-09-25 14:35:42 +02:00
[ [ 2.16.840.1.114028.10.1.2 = = " $policy_oid " ] ] || \
[ [ 2.16.840.1.114412.1.3.0.2 = = " $policy_oid " ] ] || \
[ [ 2.16.840.1.114412.2.1 = = " $policy_oid " ] ] || \
[ [ 2.16.578.1.26.1.3.3 = = " $policy_oid " ] ] || \
[ [ 1.3.6.1.4.1.17326.10.14.2.1.2 = = " $policy_oid " ] ] || \
[ [ 1.3.6.1.4.1.17326.10.8.12.1.2 = = " $policy_oid " ] ] || \
2016-01-23 19:18:33 +01:00
[ [ 1.3.6.1.4.1.13177.10.1.3.10 = = " $policy_oid " ] ] ; then
2015-09-17 15:30:15 +02:00
out "yes "
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } ev " "OK" "Extended Validation (EV) (experimental) : yes"
2015-09-17 15:30:15 +02:00
else
out "no "
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } ev " "INFO" "Extended Validation (EV) (experimental) : no"
2015-09-17 15:30:15 +02:00
fi
debugme echo " ( $( newline_to_spaces " $policy_oid " ) ) "
outln
2016-01-23 19:18:33 +01:00
#TODO: use browser OIDs:
2015-09-17 15:30:15 +02:00
# https://mxr.mozilla.org/mozilla-central/source/security/certverifier/ExtendedValidation.cpp
# http://src.chromium.org/chrome/trunk/src/net/cert/ev_root_ca_metadata.cc
# https://certs.opera.com/03/ev-oids.xml
2016-01-30 23:59:29 +01:00
out " $indent " ; pr_bold " Certificate Expiration "
2015-12-08 17:51:46 +01:00
2016-06-20 21:51:40 +02:00
enddate = $( parse_date " $( $OPENSSL x509 -in $HOSTCERT -noout -enddate 2>>$ERRFILE | cut -d= -f 2) " +"%F %H:%M %z" "%b %d %T %Y %Z" )
startdate = $( parse_date " $( $OPENSSL x509 -in $HOSTCERT -noout -startdate 2>>$ERRFILE | cut -d= -f 2) " +"%F %H:%M" "%b %d %T %Y %Z" )
days2expire = $(( $( parse_date " $enddate " "+%s" "%F %H:%M %z" ) - $( LC_ALL = C date "+%s" ) )) # in seconds
2016-01-23 19:18:33 +01:00
days2expire = $(( days2expire / 3600 / 24 ))
2015-12-08 17:51:46 +01:00
2016-06-06 13:42:17 +02:00
if grep -q "^Let's Encrypt Authority" <<< " $issuer_CN " ; then # we take the half of the thresholds for LE certificates
days2warn2 = $(( days2warn2 / 2 ))
days2warn1 = $(( days2warn1 / 2 ))
2016-06-02 21:31:24 +02:00
fi
2016-02-01 22:06:27 +01:00
expire = $( $OPENSSL x509 -in $HOSTCERT -checkend 1 2>>$ERRFILE )
2017-03-18 21:01:55 +01:00
if ! grep -qw not <<< " $expire " ; then
2016-03-01 20:31:26 +01:00
pr_svrty_critical "expired!"
2016-01-23 19:18:33 +01:00
expfinding = "expired!"
2016-10-28 15:30:07 +02:00
expok = "CRITICAL"
2015-09-17 15:30:15 +02:00
else
2016-06-06 13:42:17 +02:00
secs2warn = $(( 24 * 60 * 60 * days2warn2)) # low threshold first
2015-09-17 15:30:15 +02:00
expire = $( $OPENSSL x509 -in $HOSTCERT -checkend $secs2warn 2>>$ERRFILE )
if echo " $expire " | grep -qw not; then
2016-06-06 13:42:17 +02:00
secs2warn = $(( 24 * 60 * 60 * days2warn1))
2015-09-17 15:30:15 +02:00
expire = $( $OPENSSL x509 -in $HOSTCERT -checkend $secs2warn 2>>$ERRFILE )
if echo " $expire " | grep -qw not; then
2016-06-06 13:42:17 +02:00
pr_done_good " $days2expire >= $days2warn1 days "
expfinding += " $days2expire >= $days2warn1 days "
2015-09-17 15:30:15 +02:00
else
2016-06-06 13:42:17 +02:00
pr_svrty_medium " expires < $days2warn1 days ( $days2expire ) "
expfinding += " expires < $days2warn1 days ( $days2expire ) "
2016-10-28 15:30:07 +02:00
expok = "MEDIUM"
2015-09-17 15:30:15 +02:00
fi
else
2016-06-06 13:42:17 +02:00
pr_svrty_high " expires < $days2warn2 days ( $days2expire ) ! "
expfinding += " expires < $days2warn2 days ( $days2expire ) ! "
2016-10-28 15:30:07 +02:00
expok = "HIGH"
2015-09-17 15:30:15 +02:00
fi
fi
outln " ( $startdate --> $enddate ) "
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } expiration " " $expok " " Certificate Expiration : $expfinding ( $startdate --> $enddate ) "
2015-09-17 15:30:15 +02:00
2016-01-28 23:06:34 +01:00
certificates_provided = 1+$( grep -c "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TEMPDIR /intermediatecerts.pem)
2016-01-30 23:59:29 +01:00
out " $indent " ; pr_bold " # of certificates provided" ; outln " $certificates_provided "
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } certcount " "INFO" " # of certificates provided : $certificates_provided "
2015-09-17 15:30:15 +02:00
2017-02-16 19:10:59 +01:00
# Get both CRL and OCSP URI upfront. If there's none, this is not good. And we need to penalize this in the output
2017-02-04 15:11:03 +01:00
crl = " $( $OPENSSL x509 -in $HOSTCERT -noout -text 2>>$ERRFILE | \
2017-02-06 17:47:17 +01:00
awk '/X509v3 CRL Distribution/{i=50} i&&i--' | awk '/^$/,/^ [a-zA-Z0-9]+|^ Signature Algorithm:/' | awk -F'URI:' '/URI/ { print $2 }' ) "
2017-01-16 14:06:32 +01:00
ocsp_uri = $( $OPENSSL x509 -in $HOSTCERT -noout -ocsp_uri 2>>$ERRFILE )
2016-01-30 23:59:29 +01:00
out " $indent " ; pr_bold " Certificate Revocation List "
2017-01-16 14:06:32 +01:00
if [ [ -z " $crl " ] ] ; then
if [ [ -n " $ocsp_uri " ] ] ; then
outln "--"
fileout " ${ json_prefix } crl " "INFO" "No CRL provided"
else
2017-02-16 19:10:59 +01:00
pr_svrty_high "NOT ok --"
outln " neither CRL nor OCSP URI provided"
fileout " ${ json_prefix } crl " "HIGH" "Neither CRL nor OCSP URI provided"
2017-01-16 14:06:32 +01:00
fi
2017-02-16 19:10:59 +01:00
else
2015-09-28 22:54:00 +02:00
if [ [ $( count_lines " $crl " ) -eq 1 ] ] ; then
outln " $crl "
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } crl " "INFO" " Certificate Revocation List : $crl "
2015-09-28 22:54:00 +02:00
else # more than one CRL
2016-01-23 19:18:33 +01:00
out_row_aligned " $crl " " $spaces "
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } crl " "INFO" " Certificate Revocation List : $crl "
2015-09-28 22:54:00 +02:00
fi
fi
2015-09-17 15:30:15 +02:00
2016-01-30 23:59:29 +01:00
out " $indent " ; pr_bold " OCSP URI "
2017-02-02 21:52:32 +01:00
if [ [ -z " $ocsp_uri " ] ] ; then
2017-01-16 14:06:32 +01:00
outln "--"
fileout " ${ json_prefix } ocsp_uri " "INFO" "OCSP URI : --"
2016-01-23 19:18:33 +01:00
else
2017-02-02 19:04:49 +01:00
if [ [ $( count_lines " $ocsp_uri " ) -eq 1 ] ] ; then
outln " $ocsp_uri "
else
out_row_aligned " $ocsp_uri " " $spaces "
fi
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } ocsp_uri " "INFO" " OCSP URI : $ocsp_uri "
2016-01-23 19:18:33 +01:00
fi
2015-09-17 15:30:15 +02:00
2016-02-06 22:31:32 +01:00
out " $indent " ; pr_bold " OCSP stapling "
2017-06-01 16:24:45 +02:00
if grep -a "OCSP response" <<< " $ocsp_response " | grep -q "no response sent" ; then
if [ [ -n " $ocsp_uri " ] ] ; then
2017-02-17 21:20:37 +01:00
pr_svrty_low "--"
fileout " ${ json_prefix } ocsp_stapling " "LOW" "OCSP stapling : not offered"
else
out "--"
fileout " ${ json_prefix } ocsp_stapling " "INFO" "OCSP stapling : not offered"
fi
2015-09-17 15:30:15 +02:00
else
2016-01-28 23:06:34 +01:00
if grep -a "OCSP Response Status" <<< " $ocsp_response_status " | grep -q successful; then
2016-03-01 20:36:41 +01:00
pr_done_good "offered"
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } ocsp_stapling " "OK" "OCSP stapling : offered"
2017-06-01 16:24:45 +02:00
provides_stapling = true
2015-09-17 15:30:15 +02:00
else
2015-10-11 23:07:16 +02:00
if $GOST_STATUS_PROBLEM ; then
2016-02-06 22:31:32 +01:00
outln "(GOST servers make problems here, sorry)"
2016-02-09 19:35:46 +01:00
fileout " ${ json_prefix } ocsp_stapling " "OK" "OCSP stapling : (GOST servers make problems here, sorry)"
2015-09-17 15:30:15 +02:00
ret = 0
else
2016-02-06 22:31:32 +01:00
out "(response status unknown)"
2016-04-21 18:04:33 +02:00
fileout " ${ json_prefix } ocsp_stapling " "OK" " OCSP stapling : not sure what's going on here, debug: $ocsp_response "
2016-02-07 03:07:30 +01:00
debugme grep -a -A20 -B2 "OCSP response" <<< " $ocsp_response "
2015-09-17 15:30:15 +02:00
ret = 2
fi
fi
fi
2017-01-17 11:19:57 +01:00
outln
2017-06-01 16:24:45 +02:00
out " $indent " ; pr_bold " OCSP must staple " ;
must_staple " $json_prefix " " $provides_stapling "
2017-01-21 19:43:07 +01:00
out " $indent " ; pr_bold " DNS CAA RR" ; out " (experimental) "
2017-10-18 08:05:02 +02:00
2017-10-18 08:22:51 +02:00
caa_node = " $NODE "
2017-10-18 08:05:02 +02:00
caa = ""
2017-10-18 17:13:05 +02:00
while ( [ [ -z " $caa " ] ] && [ [ ! -z " $caa_node " ] ] ) ; do
2017-10-18 08:05:02 +02:00
caa = " $( get_caa_rr_record $caa_node ) "
2017-10-18 17:13:05 +02:00
[ [ $caa_node = ~ '.' $ ] ] || caa_node += "."
2017-10-18 15:25:43 +02:00
caa_node = ${ caa_node #*. }
2017-10-18 08:05:02 +02:00
done
2017-01-21 19:43:07 +01:00
if [ [ -n " $caa " ] ] ; then
2017-10-18 18:43:54 +02:00
pr_done_good "available" ; out " - please check for match with \"Issuer\" above"
if [ [ $( count_lines " $caa " ) -eq 1 ] ] ; then
out ": "
else
outln; out " $spaces "
fi
while read caa; do
if [ [ -n " $caa " ] ] ; then
all_caa += " $caa , "
fi
done <<< " $caa "
all_caa = ${ all_caa %, } # strip trailing comma
pr_italic " $( out_row_aligned_max_width " $all_caa " " $indent " $TERM_WIDTH ) "
fileout " ${ json_prefix } CAA_record " "OK" " DNS Certification Authority Authorization (CAA) Resource Record / RFC6844 (check for match): \" $all_caa \" "
2017-06-01 18:08:13 +02:00
elif " $NODNS " ; then
pr_warning "(was instructed to not use DNS)"
fileout " ${ json_prefix } CAA_record " "WARN" "DNS Certification Authority Authorization (CAA) Resource Record / RFC6844 : test skipped as instructed"
2017-01-21 19:43:07 +01:00
else
2017-02-03 13:03:22 +01:00
pr_svrty_low "--"
2017-01-21 19:43:07 +01:00
fileout " ${ json_prefix } CAA_record " "LOW" "DNS Certification Authority Authorization (CAA) Resource Record / RFC6844 : not offered"
2017-01-17 11:19:57 +01:00
fi
2017-08-03 21:02:41 +02:00
outln
2017-01-17 11:19:57 +01:00
2017-08-03 21:02:41 +02:00
out " $indent " ; pr_bold " Certificate Transparency " ;
if [ [ " $ct " = ~ extension ] ] ; then
pr_done_good "yes" ; outln " ( $ct ) "
fileout " ${ json_prefix } certificate_transparency " "OK" " Certificate Transparency: yes ( $ct ) "
else
outln " $ct "
fileout " ${ json_prefix } certificate_transparency " "INFO" " Certificate Transparency: $ct "
fi
outln
2015-09-17 15:30:15 +02:00
return $ret
2015-05-17 22:43:53 +02:00
}
2016-02-03 00:05:57 +01:00
2016-01-28 23:06:34 +01:00
run_server_defaults( ) {
2017-03-30 16:08:26 +02:00
local ciph newhostcert sni
local match_found
2017-05-15 13:18:20 +02:00
local sessticket_lifetime_hint = "" lifetime unit
2016-01-28 23:06:34 +01:00
local -i i n
local -i certs_found = 0
2016-08-30 21:22:46 +02:00
local -a previous_hostcert previous_intermediates keysize cipher
2017-08-03 21:02:41 +02:00
local -a ocsp_response ocsp_response_status sni_used tls_version ct
2017-03-30 16:08:26 +02:00
local -a ciphers_to_test
local -a -i success
2017-03-28 19:54:54 +02:00
local cn_nosni cn_sni sans_nosni sans_sni san tls_extensions
2016-06-01 21:57:40 +02:00
2016-01-28 23:06:34 +01:00
# Try each public key type once:
# ciphers_to_test[1]: cipher suites using certificates with RSA signature public keys
# ciphers_to_test[2]: cipher suites using certificates with RSA key encipherment public keys
# ciphers_to_test[3]: cipher suites using certificates with DSA signature public keys
# ciphers_to_test[4]: cipher suites using certificates with DH key agreement public keys
# ciphers_to_test[5]: cipher suites using certificates with ECDH key agreement public keys
# ciphers_to_test[6]: cipher suites using certificates with ECDSA signature public keys
# ciphers_to_test[7]: cipher suites using certificates with GOST R 34.10 (either 2001 or 94) public keys
ciphers_to_test[ 1] = ""
ciphers_to_test[ 2] = ""
2017-09-22 20:06:51 +02:00
for ciph in $( colon_to_spaces $( $OPENSSL ciphers "aRSA" 2>>$ERRFILE ) ) ; do
2017-10-09 15:13:46 +02:00
if [ [ " $ciph " = ~ -RSA- ] ] ; then
2017-03-30 16:27:08 +02:00
ciphers_to_test[ 1] = " ${ ciphers_to_test [1] } : $ciph "
else
ciphers_to_test[ 2] = " ${ ciphers_to_test [2] } : $ciph "
fi
2016-01-28 23:06:34 +01:00
done
[ [ -n " ${ ciphers_to_test [1] } " ] ] && ciphers_to_test[ 1] = " ${ ciphers_to_test [1] : 1 } "
[ [ -n " ${ ciphers_to_test [2] } " ] ] && ciphers_to_test[ 2] = " ${ ciphers_to_test [2] : 1 } "
ciphers_to_test[ 3] = "aDSS"
ciphers_to_test[ 4] = "aDH"
ciphers_to_test[ 5] = "aECDH"
ciphers_to_test[ 6] = "aECDSA"
ciphers_to_test[ 7] = "aGOST"
2016-06-01 21:57:40 +02:00
for ( ( n = 1; n <= 14 ; n++ ) ) ; do
2017-03-30 16:27:08 +02:00
# Some servers use a different certificate if the ClientHello
# specifies TLSv1.1 and doesn't include a server name extension.
# So, for each public key type for which a certificate was found,
# try again, but only with TLSv1.1 and without SNI.
if [ [ $n -ge 8 ] ] ; then
ciphers_to_test[ n] = ""
[ [ ${ success [n-7] } -eq 0 ] ] && ciphers_to_test[ n] = " ${ ciphers_to_test [n-7] } "
fi
2016-06-01 21:57:40 +02:00
2017-03-30 16:27:08 +02:00
if [ [ -n " ${ ciphers_to_test [n] } " ] ] && [ [ $( count_ciphers $( $OPENSSL ciphers " ${ ciphers_to_test [n] } " 2>>$ERRFILE ) ) -ge 1 ] ] ; then
if [ [ $n -ge 8 ] ] ; then
sni = " $SNI "
SNI = ""
get_server_certificate " -cipher ${ ciphers_to_test [n] } " "tls1_1"
success[ n] = $?
SNI = " $sni "
else
get_server_certificate " -cipher ${ ciphers_to_test [n] } "
success[ n] = $?
fi
if [ [ ${ success [n] } -eq 0 ] ] ; then
2017-03-24 19:16:36 +01:00
[ [ $n -ge 8 ] ] && [ [ ! -e $HOSTCERT .nosni ] ] && cp $HOSTCERT $HOSTCERT .nosni
2017-03-30 16:27:08 +02:00
cp " $TEMPDIR / $NODEIP .get_server_certificate.txt " $TMPFILE
>$ERRFILE
2017-05-15 13:18:20 +02:00
if [ [ -z " $sessticket_lifetime_hint " ] ] ; then
sessticket_lifetime_hint = $( grep -aw "session ticket" $TMPFILE | grep -a lifetime)
2017-03-30 16:27:08 +02:00
fi
# check whether the host's certificate has been seen before
match_found = false
i = 1
newhostcert = $( cat $HOSTCERT )
while [ [ $i -le $certs_found ] ] ; do
if [ [ " $newhostcert " = = " ${ previous_hostcert [i] } " ] ] ; then
match_found = true
break;
fi
i = $(( i + 1 ))
done
if ! " $match_found " && [ [ $n -ge 8 ] ] && [ [ $certs_found -ne 0 ] ] ; then
# A new certificate was found using TLSv1.1 without SNI.
# Check to see if the new certificate should be displayed.
# It should be displayed if it is either a match for the
# $NODE being tested or if it has the same subject
# (CN and SAN) as other certificates for this host.
compare_server_name_to_cert " $NODE " " $HOSTCERT "
[ [ $? -ne 0 ] ] && success[ n] = 0 || success[ n] = 1
if [ [ ${ success [n] } -ne 0 ] ] ; then
cn_nosni = " $( toupper " $( get_cn_from_cert $HOSTCERT ) " ) "
2017-04-01 10:38:04 +02:00
sans_nosni = " $( toupper " $( get_san_dns_from_cert " $HOSTCERT " ) " ) "
2017-03-30 16:27:08 +02:00
echo " ${ previous_hostcert [1] } " > $HOSTCERT
cn_sni = " $( toupper " $( get_cn_from_cert $HOSTCERT ) " ) "
# FIXME: Not sure what the matching rule should be. At
# the moment, the no SNI certificate is considered a
# match if the CNs are the same and the SANs (if
# present) contain at least one DNS name in common.
if [ [ " $cn_nosni " = = " $cn_sni " ] ] ; then
2017-04-01 10:38:04 +02:00
sans_sni = " $( toupper " $( get_san_dns_from_cert " $HOSTCERT " ) " ) "
2017-03-30 16:27:08 +02:00
if [ [ " $sans_nosni " = = " $sans_sni " ] ] ; then
success[ n] = 0
else
for san in $sans_nosni ; do
[ [ " $sans_sni " = ~ " $san " ] ] && success[ n] = 0 && break
done
fi
2016-06-01 22:20:10 +02:00
fi
2016-06-01 21:57:40 +02:00
fi
2017-03-30 16:27:08 +02:00
# If the certificate found for TLSv1.1 w/o SNI appears to
# be for a different host, then set match_found to true so
# that the new certificate will not be included in the output.
[ [ ${ success [n] } -ne 0 ] ] && match_found = true
fi
if ! " $match_found " ; then
2017-04-12 21:00:08 +02:00
certs_found = $(( certs_found + 1 ))
2017-03-30 16:27:08 +02:00
cipher[ certs_found] = ${ ciphers_to_test [n] }
2017-03-31 17:04:04 +02:00
keysize[ certs_found] = $( awk '/Server public key/ { print $(NF-1) }' $TMPFILE )
2017-08-03 21:02:41 +02:00
# If an OCSP response was sent, then get the full
# response so that certificate_info() can determine
# whether it includes a certificate transparency extension.
if grep -a "OCSP response:" $TMPFILE | grep -q "no response sent" ; then
ocsp_response[ certs_found] = " $( grep -a "OCSP response" $TMPFILE ) "
else
ocsp_response[ certs_found] = " $( awk -v n = 2 '/OCSP response:/ {start=1; inc=2} /======================================/ { if (start) {inc--} } inc' $TMPFILE ) "
fi
2017-03-30 16:27:08 +02:00
ocsp_response_status[ certs_found] = $( grep -a "OCSP Response Status" $TMPFILE )
previous_hostcert[ certs_found] = $newhostcert
previous_intermediates[ certs_found] = $( cat $TEMPDIR /intermediatecerts.pem)
[ [ $n -ge 8 ] ] && sni_used[ certs_found] = "" || sni_used[ certs_found] = " $SNI "
2017-08-03 21:02:41 +02:00
tls_version[ certs_found] = " $DETECTED_TLS_VERSION "
2017-03-30 16:27:08 +02:00
fi
fi
fi
2016-01-28 23:06:34 +01:00
done
2016-10-28 15:30:07 +02:00
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
determine_tls_extensions
2017-01-04 16:19:11 +01:00
if [ [ $? -eq 0 ] ] && [ [ " $OPTIMAL_PROTO " != "-ssl2" ] ] ; then
cp " $TEMPDIR / $NODEIP .determine_tls_extensions.txt " $TMPFILE
>$ERRFILE
2017-05-15 13:18:20 +02:00
[ [ -z " $sessticket_lifetime_hint " ] ] && sessticket_lifetime_hint = $( grep -aw "session ticket" $TMPFILE | grep -a lifetime)
2017-01-04 16:19:11 +01:00
fi
Find more extensions in run_server_defaults()
This PR uses `tls_sockets()` to determine whether a server supports certain extensions that may not be supported by `$OPENSSL`. At the moment it checks for max_fragment_length, client_certificate_url, truncated_hmac, ALPN, signed_certificate_timestamp, encrypt_then_mac, and extended_master_secret.
In https://github.com/dcooper16/testssl.sh/blob/extended_tls_sockets/testssl.sh, `run_server_defaults()` is re-written to use `tls_sockets()` instead of `$OPENSSL`, with just one call to `$OPENSSL s_client` to get the session ticket, which reduces the dependence on `$OPENSSL`, but this PR limits the number of calls to `tls_sockets()`, which is still slow.
Note: I included ALPN in the `tls_sockets()` ClientHello since a single call to `tls_sockets()` cannot test for both NPN and ALPN, and since support for NPN was added to OpenSSL before support for ALPN was added, I figured it was more likely that `determine_tls_extensions()` had already determined whether the server supported NPN.
2016-11-08 18:36:25 +01:00
2017-08-03 21:02:41 +02:00
# Now that all of the server's certificates have been found, determine for
# each certificate whether certificate transparency information is provided.
for ( ( i = 1; i <= certs_found; i++ ) ) ; do
ct[ i] = " $( certificate_transparency " ${ previous_hostcert [i] } " " ${ ocsp_response [i] } " " $certs_found " " ${ cipher [i] } " " ${ sni_used [i] } " " ${ tls_version [i] } " ) "
# If certificate_transparency() called tls_sockets() and found a "signed certificate timestamps" extension, then add it to $TLS_EXTENSIONS,
# since it may not have been found by determine_tls_extensions().
[ [ $certs_found -gt 1 ] ] && [ [ " ${ ct [i] } " = = "TLS extension" ] ] && extract_new_tls_extensions " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt "
done
2016-01-28 23:06:34 +01:00
outln
pr_headlineln " Testing server defaults (Server Hello) "
outln
2016-02-03 00:05:57 +01:00
pr_bold " TLS extensions (standard) "
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
if [ [ -z " $TLS_EXTENSIONS " ] ] ; then
2017-02-03 13:03:22 +01:00
outln "(none)"
fileout "tls_extensions" "INFO" "TLS server extensions (std): (none)"
2016-01-28 23:06:34 +01:00
else
2017-05-15 13:18:20 +02:00
#FIXME: we rather want to have the chance to print each ext in italics or another format.
# Atm is a string of quoted strings -- that needs to be fixed at the root then
2017-03-28 19:54:54 +02:00
# out_row_aligned_max_width() places line breaks at space characters.
# So, in order to prevent the text for an extension from being broken
# across lines, temporarily replace space characters within the text
# of an extension with "}", and then convert the "}" back to space in
# the output of out_row_aligned_max_width().
tls_extensions = " ${ TLS_EXTENSIONS // /{ } "
tls_extensions = " ${ tls_extensions // \" { \" / \" \" } "
tls_extensions = " $( out_row_aligned_max_width " $tls_extensions " " " $TERM_WIDTH ) "
tls_extensions = " ${ tls_extensions //{/ } "
outln " $tls_extensions "
2017-02-03 13:03:22 +01:00
fileout "tls_extensions" "INFO" " TLS server extensions (std): $TLS_EXTENSIONS "
2016-01-28 23:06:34 +01:00
fi
2017-05-15 13:18:20 +02:00
pr_bold " Session Ticket RFC 5077 hint "
if [ [ -z " $sessticket_lifetime_hint " ] ] ; then
outln "(no lifetime advertised)"
fileout "session_ticket" "INFO" "TLS session ticket RFC 5077 lifetime: none advertised"
# it MAY be given a hint of the lifetime of the ticket, see https://tools.ietf.org/html/rfc5077#section-5.6 .
# Sometimes it just does not -- but it then may also support TLS session tickets reuse
2016-01-28 23:06:34 +01:00
else
2017-05-15 13:18:20 +02:00
lifetime = $( grep -a lifetime <<< " $sessticket_lifetime_hint " | sed 's/[A-Za-z:() ]//g' )
unit = $( grep -a lifetime <<< " $sessticket_lifetime_hint " | sed -e 's/^.*' " $lifetime " '//' -e 's/[ ()]//g' )
2017-08-30 12:54:52 +02:00
out " $lifetime $unit "
if [ [ $(( 3600 * 24 )) -lt $lifetime ] ] ; then
prln_svrty_low " but: PFS requires session ticket keys to be rotated < daily !"
fileout "session_ticket" "LOW" " TLS session ticket RFC 5077 valid for $lifetime $unit but PFS requires session ticket keys to be rotated at least daily! "
else
outln ", session tickets keys seems to be rotated < daily"
fileout "session_ticket" "INFO" " TLS session ticket RFC 5077 valid for $lifetime $unit only (PFS requires session ticket keys are rotated at least daily) "
fi
2016-01-28 23:06:34 +01:00
fi
2016-10-28 15:30:07 +02:00
2016-01-28 23:06:34 +01:00
pr_bold " SSL Session ID support "
2016-01-30 23:59:29 +01:00
if " $NO_SSL_SESSIONID " ; then
2016-01-28 23:06:34 +01:00
outln "no"
fileout "session_id" "INFO" "SSL session ID support: no"
else
outln "yes"
fileout "session_id" "INFO" "SSL session ID support: yes"
fi
2016-10-28 15:30:07 +02:00
2017-04-22 15:39:18 +02:00
pr_bold " Session Resumption "
2017-05-15 13:18:20 +02:00
sub_session_resumption
case $? in
0) SESS_RESUMPTION[ 2] = "ticket=yes"
out "Tickets: yes, "
fileout "session_resumption_ticket" "INFO" "Session resumption via TLS Session Tickets supported"
; ;
1) SESS_RESUMPTION[ 2] = "ticket=no"
out "Tickets no, "
fileout "session_resumption_ticket" "INFO" "Session resumption via Session Tickets is not supported"
; ;
2017-10-30 18:41:19 +01:00
2) SESS_RESUMPTION[ 2] = "ticket=clientauth"
pr_warning "Client Auth: Ticket resumption test not supported / "
fileout "session_resumption_ticket" "WARN" "resumption test for TLS Session Tickets couldn't be performed because client authentication is missing"
; ;
7) SESS_RESUMPTION[ 2] = "ticket=noclue"
2017-05-15 13:18:20 +02:00
pr_warning "Ticket resumption test failed, pls report / "
2017-08-28 20:54:08 +02:00
fileout "session_resumption_ticket" "WARN" "resumption test for TLS Session Tickets failed, pls report"
2017-05-15 13:18:20 +02:00
; ;
esac
2017-04-22 15:39:18 +02:00
if " $NO_SSL_SESSIONID " ; then
SESS_RESUMPTION[ 1] = "ID=no"
outln "ID: no"
fileout "session_resumption_id" "INFO" "No Session ID, no resumption"
else
sub_session_resumption ID
case $? in
0) SESS_RESUMPTION[ 1] = "ID=yes"
outln "ID: yes"
fileout "session_resumption_id" "INFO" "Session resumption via Session ID supported"
; ;
1) SESS_RESUMPTION[ 1] = "ID=no"
outln "ID: no"
fileout "session_resumption_id" "INFO" "Session resumption via Session ID is not supported"
; ;
2017-10-30 18:41:19 +01:00
2) SESS_RESUMPTION[ 1] = "ID=clientauth"
[ [ ${ SESS_RESUMPTION [2] } = ~ clientauth ] ] || pr_warning "Client Auth: "
prln_warning "ID resumption resumption test not supported"
fileout "session_resumption_ID" "WARN" "resumption test via Session ID couldn't be performed because client authentication is missing"
; ;
7) SESS_RESUMPTION[ 1] = "ID=noclue"
2017-04-22 15:39:18 +02:00
prln_warning "ID resumption test failed, pls report"
2017-08-28 20:54:08 +02:00
fileout "session_resumption_ID" "WARN" "resumption test via Session ID failed, pls report"
2017-04-22 15:39:18 +02:00
; ;
esac
fi
2016-01-28 23:06:34 +01:00
tls_time
2016-10-28 15:30:07 +02:00
2017-03-24 19:16:36 +01:00
if [ [ -n " $SNI " ] ] && [ [ $certs_found -ne 0 ] ] && [ [ ! -e $HOSTCERT .nosni ] ] ; then
# no cipher suites specified here. We just want the default vhost subject
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $OPTIMAL_PROTO " ) 2>>$ERRFILE </dev/null | awk '/-----BEGIN/,/-----END/ { print $0 }' >$HOSTCERT .nosni
fi
2017-08-03 21:02:41 +02:00
for ( ( i = 1; i <= certs_found; i++ ) ) ; do
2017-03-30 16:27:08 +02:00
echo " ${ previous_hostcert [i] } " > $HOSTCERT
echo " ${ previous_intermediates [i] } " > $TEMPDIR /intermediatecerts.pem
2017-08-03 21:02:41 +02:00
certificate_info " $i " " $certs_found " " ${ cipher [i] } " " ${ keysize [i] } " " ${ ocsp_response [i] } " " ${ ocsp_response_status [i] } " " ${ sni_used [i] } " " ${ ct [i] } "
2016-01-28 23:06:34 +01:00
done
}
2017-03-31 17:04:04 +02:00
get_session_ticket_lifetime_from_serverhello( ) {
awk '/session ticket.*lifetime/ { print $(NF-1) "$1" }'
}
get_san_dns_from_cert( ) {
2017-04-01 10:38:04 +02:00
echo " $( $OPENSSL x509 -in " $1 " -noout -text 2>>$ERRFILE | \
2017-03-31 17:04:04 +02:00
grep -A2 "Subject Alternative Name" | tr ',' '\n' | grep "DNS:" | \
sed -e 's/DNS://g' -e 's/ //g' | tr '\n' ' ' ) "
}
2015-07-22 13:11:20 +02:00
run_pfs( ) {
2015-10-11 23:07:16 +02:00
local -i sclient_success
2016-12-08 18:36:45 +01:00
local pfs_offered = false ecdhe_offered = false ffdhe_offered = false
2017-11-02 16:28:09 +01:00
local pfs_tls13_offered = false
local protos_to_try proto hexc dash pfs_cipher sslvers auth mac export curve dhlen
2016-12-08 18:36:45 +01:00
local -a hexcode normalized_hexcode ciph rfc_ciph kx enc ciphers_found sigalg ossl_supported
2017-02-14 20:40:38 +01:00
# generated from 'kEECDH:kEDH:!aNULL:!eNULL:!DES:!3DES:!RC4' with openssl 1.0.2i and openssl 1.1.0
2017-11-02 16:28:09 +01:00
local pfs_cipher_list = "TLS13-AES-128-GCM-SHA256:TLS13-AES-256-GCM-SHA384:TLS13-CHACHA20-POLY1305-SHA256:TLS13-AES-128-CCM-SHA256:TLS13-AES-128-CCM-8-SHA256:DHE-DSS-AES128-GCM-SHA256:DHE-DSS-AES128-SHA256:DHE-DSS-AES128-SHA:DHE-DSS-AES256-GCM-SHA384:DHE-DSS-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-DSS-CAMELLIA128-SHA256:DHE-DSS-CAMELLIA128-SHA:DHE-DSS-CAMELLIA256-SHA256:DHE-DSS-CAMELLIA256-SHA:DHE-DSS-SEED-SHA:DHE-RSA-AES128-CCM8:DHE-RSA-AES128-CCM:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-CCM8:DHE-RSA-AES256-CCM:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-SHA256:DHE-RSA-AES256-SHA:DHE-RSA-CAMELLIA128-SHA256:DHE-RSA-CAMELLIA128-SHA:DHE-RSA-CAMELLIA256-SHA256:DHE-RSA-CAMELLIA256-SHA:DHE-RSA-CHACHA20-POLY1305-OLD:DHE-RSA-CHACHA20-POLY1305:DHE-RSA-SEED-SHA:ECDHE-ECDSA-AES128-CCM8:ECDHE-ECDSA-AES128-CCM:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-ECDSA-AES256-CCM8:ECDHE-ECDSA-AES256-CCM:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-ECDSA-CAMELLIA128-SHA256:ECDHE-ECDSA-CAMELLIA256-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305-OLD:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-RSA-CAMELLIA128-SHA256:ECDHE-RSA-CAMELLIA256-SHA384:ECDHE-RSA-CHACHA20-POLY1305-OLD:ECDHE-RSA-CHACHA20-POLY1305"
2017-02-14 20:40:38 +01:00
local pfs_hex_cipher_list = "" ciphers_to_test
2016-12-08 18:36:45 +01:00
local ecdhe_cipher_list = "" ecdhe_cipher_list_hex = "" ffdhe_cipher_list_hex = ""
local curves_hex = ( "00,01" "00,02" "00,03" "00,04" "00,05" "00,06" "00,07" "00,08" "00,09" "00,0a" "00,0b" "00,0c" "00,0d" "00,0e" "00,0f" "00,10" "00,11" "00,12" "00,13" "00,14" "00,15" "00,16" "00,17" "00,18" "00,19" "00,1a" "00,1b" "00,1c" "00,1d" "00,1e" )
2016-07-14 19:23:50 +02:00
local -a curves_ossl = ( "sect163k1" "sect163r1" "sect163r2" "sect193r1" "sect193r2" "sect233k1" "sect233r1" "sect239k1" "sect283k1" "sect283r1" "sect409k1" "sect409r1" "sect571k1" "sect571r1" "secp160k1" "secp160r1" "secp160r2" "secp192k1" "prime192v1" "secp224k1" "secp224r1" "secp256k1" "prime256v1" "secp384r1" "secp521r1" "brainpoolP256r1" "brainpoolP384r1" "brainpoolP512r1" "X25519" "X448" )
local -a curves_ossl_output = ( "K-163" "sect163r1" "B-163" "sect193r1" "sect193r2" "K-233" "B-233" "sect239k1" "K-283" "B-283" "K-409" "B-409" "K-571" "B-571" "secp160k1" "secp160r1" "secp160r2" "secp192k1" "P-192" "secp224k1" "P-224" "secp256k1" "P-256" "P-384" "P-521" "brainpoolP256r1" "brainpoolP384r1" "brainpoolP512r1" "X25519" "X448" )
2016-12-08 18:36:45 +01:00
local -a ffdhe_groups_hex = ( "01,00" "01,01" "01,02" "01,03" "01,04" )
local -a ffdhe_groups_output = ( "ffdhe2048" "ffdhe3072" "ffdhe4096" "ffdhe6144" "ffdhe8192" )
2017-02-09 17:45:29 +01:00
local -a supported_curve
2016-12-08 18:36:45 +01:00
local -i nr_supported_ciphers = 0 nr_curves = 0 nr_ossl_curves = 0 i j low high
2017-02-09 17:36:24 +01:00
local pfs_ciphers curves_offered = "" curves_to_test temp
2016-12-08 18:36:45 +01:00
local len1 len2 curve_found
local has_dh_bits = " $HAS_DH_BITS "
local using_sockets = true
" $SSL_NATIVE " && using_sockets = false
" $FAST " && using_sockets = false
[ [ $TLS_NR_CIPHERS = = 0 ] ] && using_sockets = false
2015-09-17 15:30:15 +02:00
outln
2017-02-25 16:31:30 +01:00
pr_headline " Testing robust (perfect) forward secrecy" ; prln_underline ", (P)FS -- omitting Null Authentication/Encryption, 3DES, RC4 "
2016-12-08 18:36:45 +01:00
if ! " $using_sockets " ; then
[ [ $TLS_NR_CIPHERS = = 0 ] ] && ! " $SSL_NATIVE " && ! " $FAST " && pr_warning " Cipher mapping not available, doing a fallback to openssl"
if ! " $HAS_DH_BITS " && " $WIDE " ; then
[ [ $TLS_NR_CIPHERS = = 0 ] ] && ! " $SSL_NATIVE " && ! " $FAST " && out "."
pr_warning " (Your $OPENSSL cannot show DH/ECDH bits) "
fi
outln
2016-03-05 21:07:49 +01:00
fi
2015-09-17 15:30:15 +02:00
2016-12-08 18:36:45 +01:00
if " $using_sockets " || [ [ $OSSL_VER_MAJOR -lt 1 ] ] ; then
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
pfs_cipher = " ${ TLS_CIPHER_RFC_NAME [i] } "
2017-11-02 16:28:09 +01:00
hexc = " ${ TLS_CIPHER_HEXCODE [i] } "
if ( [ [ " $pfs_cipher " = = "TLS_DHE_" * ] ] || [ [ " $pfs_cipher " = = "TLS_ECDHE_" * ] ] || [ [ " ${ hexc : 2 : 2 } " = = "13" ] ] ) && \
2017-02-24 16:22:59 +01:00
[ [ ! " $pfs_cipher " = ~ NULL ] ] && [ [ ! " $pfs_cipher " = ~ DES ] ] && [ [ ! " $pfs_cipher " = ~ RC4 ] ] && \
[ [ ! " $pfs_cipher " = ~ PSK ] ] && ( " $using_sockets " || " ${ TLS_CIPHER_OSSL_SUPPORTED [i] } " ) ; then
2016-12-08 18:36:45 +01:00
pfs_hex_cipher_list += " , ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } "
ciph[ nr_supported_ciphers] = " ${ TLS_CIPHER_OSSL_NAME [i] } "
rfc_ciph[ nr_supported_ciphers] = " ${ TLS_CIPHER_RFC_NAME [i] } "
kx[ nr_supported_ciphers] = " ${ TLS_CIPHER_KX [i] } "
enc[ nr_supported_ciphers] = " ${ TLS_CIPHER_ENC [i] } "
ciphers_found[ nr_supported_ciphers] = false
sigalg[ nr_supported_ciphers] = ""
ossl_supported[ nr_supported_ciphers] = " ${ TLS_CIPHER_OSSL_SUPPORTED [i] } "
hexcode[ nr_supported_ciphers] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } "
if [ [ " ${ hexc : 2 : 2 } " = = "00" ] ] ; then
normalized_hexcode[ nr_supported_ciphers] = " x ${ hexc : 7 : 2 } "
else
normalized_hexcode[ nr_supported_ciphers] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } "
fi
" $using_sockets " && ! " $has_dh_bits " && " $WIDE " && ossl_supported[ nr_supported_ciphers] = false
nr_supported_ciphers += 1
fi
done
else
while read hexc dash ciph[ nr_supported_ciphers] sslvers kx[ nr_supported_ciphers] auth enc[ nr_supported_ciphers] mac export; do
ciphers_found[ nr_supported_ciphers] = false
if [ [ " ${ hexc : 2 : 2 } " = = "00" ] ] ; then
normalized_hexcode[ nr_supported_ciphers] = " x ${ hexc : 7 : 2 } "
else
normalized_hexcode[ nr_supported_ciphers] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } "
fi
sigalg[ nr_supported_ciphers] = ""
ossl_supported[ nr_supported_ciphers] = true
nr_supported_ciphers += 1
done < <( $OPENSSL ciphers -V " $pfs_cipher_list " 2>$ERRFILE )
fi
export = ""
2017-01-05 20:20:19 +01:00
2017-11-02 16:28:09 +01:00
if [ [ $( has_server_protocol "tls1_3" ) -eq 0 ] ] ; then
# All TLSv1.3 cipher suites offer robust PFS.
sclient_success = 0
elif " $using_sockets " ; then
tls_sockets "04" " ${ pfs_hex_cipher_list : 2 } "
2016-12-08 18:36:45 +01:00
sclient_success = $?
[ [ $sclient_success -eq 2 ] ] && sclient_success = 0
else
debugme echo $nr_supported_ciphers
debugme echo $( actually_supported_ciphers $pfs_cipher_list )
if [ [ " $nr_supported_ciphers " -le " $CLIENT_MIN_PFS " ] ] ; then
outln
2017-02-25 16:31:30 +01:00
prln_local_problem " You only have $nr_supported_ciphers PFS ciphers on the client side "
2016-12-08 18:36:45 +01:00
fileout "pfs" "WARN" " (Perfect) Forward Secrecy tests: Skipped. You only have $nr_supported_ciphers PFS ciphers on the client site. ( $CLIENT_MIN_PFS are required) "
return 1
fi
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " -cipher $pfs_cipher_list $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
2016-12-08 18:36:45 +01:00
sclient_connect_successful $? $TMPFILE
sclient_success = $?
[ [ $sclient_success -eq 0 ] ] && [ [ $( grep -ac "BEGIN CERTIFICATE" $TMPFILE ) -eq 0 ] ] && sclient_success = 1
2015-09-17 15:30:15 +02:00
fi
2016-12-08 18:36:45 +01:00
if [ [ $sclient_success -ne 0 ] ] ; then
2016-03-30 23:28:31 +02:00
outln
2017-02-25 16:31:30 +01:00
prln_svrty_medium " No ciphers supporting Forward Secrecy offered"
2016-05-27 17:43:45 +02:00
fileout "pfs" "MEDIUM" "(Perfect) Forward Secrecy : No ciphers supporting Forward Secrecy offered"
2015-09-17 15:30:15 +02:00
else
2016-03-30 23:28:31 +02:00
outln
2016-02-20 21:46:17 +01:00
pfs_offered = true
2016-01-23 19:18:33 +01:00
pfs_ciphers = ""
2016-03-01 20:36:41 +01:00
pr_done_good " PFS is offered (OK)"
2016-11-17 23:27:27 +01:00
fileout "pfs" "OK" "(Perfect) Forward Secrecy : PFS is offered"
2016-03-05 21:07:49 +01:00
if " $WIDE " ; then
2016-02-20 21:46:17 +01:00
outln ", ciphers follow (client/browser support is important here) \n"
2015-09-17 15:30:15 +02:00
neat_header
else
2016-07-16 20:48:56 +02:00
out " "
2015-09-17 15:30:15 +02:00
fi
2017-11-02 16:28:09 +01:00
if " $HAS_TLS13 " ; then
protos_to_try = "-no_ssl2 -no_tls1_3"
else
protos_to_try = "-no_ssl2"
fi
for proto in $protos_to_try ; do
2016-12-08 18:36:45 +01:00
while true; do
ciphers_to_test = ""
for ( ( i = 0; i < nr_supported_ciphers; i++ ) ) ; do
2017-11-02 16:28:09 +01:00
! " ${ ciphers_found [i] } " && " ${ ossl_supported [i] } " && ciphers_to_test += " : ${ ciph [i] } "
2016-12-08 18:36:45 +01:00
done
[ [ -z " $ciphers_to_test " ] ] && break
2017-11-02 16:28:09 +01:00
$OPENSSL s_client $( s_client_options " $proto -cipher " ${ ciphers_to_test : 1 } " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) & >$TMPFILE </dev/null
sclient_connect_successful $? $TMPFILE || break
pfs_cipher = $( get_cipher $TMPFILE )
[ [ -z " $pfs_cipher " ] ] && break
2016-12-08 18:36:45 +01:00
for ( ( i = 0; i < nr_supported_ciphers; i++ ) ) ; do
2017-11-02 16:28:09 +01:00
[ [ " $pfs_cipher " = = " ${ ciph [i] } " ] ] && break
2016-12-08 18:36:45 +01:00
done
2017-11-02 16:28:09 +01:00
[ [ $i -eq $nr_supported_ciphers ] ] && break
2016-12-08 18:36:45 +01:00
ciphers_found[ i] = true
2017-11-02 16:28:09 +01:00
if [ [ " $pfs_cipher " = = TLS13* ] ] ; then
pfs_tls13_offered = true
" $WIDE " && kx[ i] = " $( read_dhtype_from_file $TMPFILE ) "
fi
2016-12-08 18:36:45 +01:00
if " $WIDE " ; then
2017-11-02 16:28:09 +01:00
dhlen = $( read_dhbits_from_file " $TMPFILE " quiet)
2016-12-08 18:36:45 +01:00
kx[ i] = " ${ kx [i] } $dhlen "
fi
2017-11-02 16:28:09 +01:00
" $WIDE " && " $SHOW_SIGALGO " && grep -q "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TMPFILE && \
sigalg[ i] = " $( read_sigalg_from_file " $TMPFILE " ) "
done
done
if " $using_sockets " ; then
for proto in 04 03; do
while true; do
ciphers_to_test = ""
for ( ( i = 0; i < nr_supported_ciphers; i++ ) ) ; do
! " ${ ciphers_found [i] } " && ciphers_to_test += " , ${ hexcode [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
[ [ " $proto " = = "04" ] ] && [ [ ! " ${ ciphers_to_test : 2 } " = ~ ,\ 13,[ 0-9a-f] [ 0-9a-f] ] ] && break
ciphers_to_test = " $( strip_inconsistent_ciphers " $proto " " $ciphers_to_test " ) "
[ [ -z " $ciphers_to_test " ] ] && break
if " $WIDE " && " $SHOW_SIGALGO " ; then
tls_sockets " $proto " " ${ ciphers_to_test : 2 } , 00,ff " "all"
else
tls_sockets " $proto " " ${ ciphers_to_test : 2 } , 00,ff " "ephemeralkey"
fi
sclient_success = $?
[ [ $sclient_success -ne 0 ] ] && [ [ $sclient_success -ne 2 ] ] && break
pfs_cipher = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
for ( ( i = 0; i < nr_supported_ciphers; i++ ) ) ; do
[ [ " $pfs_cipher " = = " ${ rfc_ciph [i] } " ] ] && break
done
[ [ $i -eq $nr_supported_ciphers ] ] && break
ciphers_found[ i] = true
if [ [ " ${ kx [i] } " = = "Kx=any" ] ] ; then
pfs_tls13_offered = true
" $WIDE " && kx[ i] = " $( read_dhtype_from_file " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " ) "
fi
if " $WIDE " ; then
dhlen = $( read_dhbits_from_file " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
fi
" $WIDE " && " $SHOW_SIGALGO " && [ [ -r " $HOSTCERT " ] ] && \
sigalg[ i] = " $( read_sigalg_from_file " $HOSTCERT " ) "
done
2016-12-08 18:36:45 +01:00
done
fi
for ( ( i = 0; i < nr_supported_ciphers; i++ ) ) ; do
! " ${ ciphers_found [i] } " && ! " $SHOW_EACH_C " && continue
if " ${ ciphers_found [i] } " ; then
2017-02-27 16:34:04 +01:00
if ( [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] && [ [ " ${ ciph [i] } " != "-" ] ] ) || [ [ " ${ rfc_ciph [i] } " = = "-" ] ] ; then
2016-12-08 18:36:45 +01:00
pfs_cipher = " ${ ciph [i] } "
else
pfs_cipher = " ${ rfc_ciph [i] } "
fi
pfs_ciphers += " $pfs_cipher "
2016-03-30 23:28:31 +02:00
2017-11-02 16:28:09 +01:00
if [ [ " ${ ciph [i] } " = = "ECDHE-" * ] ] || [ [ " ${ ciph [i] } " = = TLS13* ] ] || ( " $using_sockets " && [ [ " ${ rfc_ciph [i] } " = = "TLS_ECDHE_" * ] ] ) ; then
2016-12-08 18:36:45 +01:00
ecdhe_offered = true
ecdhe_cipher_list_hex += " , ${ hexcode [i] } "
[ [ " ${ ciph [i] } " != "-" ] ] && ecdhe_cipher_list += " : $pfs_cipher "
2015-09-17 15:30:15 +02:00
fi
2017-11-02 16:28:09 +01:00
if [ [ " ${ ciph [i] } " = = "DHE-" * ] ] || [ [ " ${ ciph [i] } " = = TLS13* ] ] || ( " $using_sockets " && [ [ " ${ rfc_ciph [i] } " = = "TLS_DHE_" * ] ] ) ; then
2016-12-08 18:36:45 +01:00
ffdhe_offered = true
ffdhe_cipher_list_hex += " , ${ hexcode [i] } "
2015-09-17 15:30:15 +02:00
fi
2016-12-08 18:36:45 +01:00
fi
if " $WIDE " ; then
2017-01-25 16:41:36 +01:00
neat_list " $( tolower " ${ normalized_hexcode [i] } " ) " " ${ ciph [i] } " " ${ kx [i] } " " ${ enc [i] } " " ${ ciphers_found [i] } "
2016-03-05 21:07:49 +01:00
if " $SHOW_EACH_C " ; then
2017-11-02 16:28:09 +01:00
if " ${ ciphers_found [i] } " ; then
2016-03-05 21:07:49 +01:00
pr_done_best "available"
2015-09-17 15:30:15 +02:00
else
2017-02-06 11:06:59 +01:00
pr_deemphasize "not a/v"
2015-09-17 15:30:15 +02:00
fi
2016-03-30 23:28:31 +02:00
fi
2016-12-08 18:36:45 +01:00
outln " ${ sigalg [i] } "
2015-09-17 15:30:15 +02:00
fi
2016-12-08 18:36:45 +01:00
done
2017-03-28 19:54:54 +02:00
! " $WIDE " && out " $( out_row_aligned_max_width " $pfs_ciphers " " " $TERM_WIDTH ) "
2015-09-17 15:30:15 +02:00
debugme echo $pfs_offered
2016-03-05 21:07:49 +01:00
" $WIDE " || outln
2016-12-08 18:36:45 +01:00
fileout "pfs_ciphers" "INFO" " (Perfect) Forward Secrecy Ciphers: $pfs_ciphers "
2015-09-17 15:30:15 +02:00
fi
2016-07-11 17:00:56 +02:00
2016-12-08 18:36:45 +01:00
# find out what elliptic curves are supported.
2016-07-11 18:44:28 +02:00
if " $ecdhe_offered " ; then
2016-07-14 19:23:50 +02:00
for curve in " ${ curves_ossl [@] } " ; do
2016-12-08 18:36:45 +01:00
ossl_supported[ nr_curves] = false
supported_curve[ nr_curves] = false
2016-12-20 20:02:29 +01:00
$OPENSSL s_client -curves $curve -connect x 2>& 1 | egrep -iaq "Error with command|unknown option"
2016-12-08 18:36:45 +01:00
[ [ $? -ne 0 ] ] && ossl_supported[ nr_curves] = true && nr_ossl_curves += 1
nr_curves += 1
2016-07-14 19:23:50 +02:00
done
# OpenSSL limits the number of curves that can be specified in the
2016-12-08 18:36:45 +01:00
# "-curves" option to 28. So, break the list in two if there are more
# than 28 curves supported by OpenSSL.
for j in 1 2; do
if [ [ $j -eq 1 ] ] ; then
if [ [ $nr_ossl_curves -le 28 ] ] ; then
low = 0; high = $nr_curves
2016-07-14 19:23:50 +02:00
else
2016-12-08 18:36:45 +01:00
low = 0; high = $nr_curves /2
2016-07-14 19:23:50 +02:00
fi
2016-12-08 18:36:45 +01:00
else
if [ [ $nr_ossl_curves -le 28 ] ] ; then
continue # all curves tested in first round
else
low = $nr_curves /2; high = $nr_curves
2016-07-14 19:23:50 +02:00
fi
2016-12-08 18:36:45 +01:00
fi
2017-11-02 16:28:09 +01:00
if " $HAS_TLS13 " ; then
if " $pfs_tls13_offered " ; then
protos_to_try = "-no_ssl2 -no_tls1_3"
else
protos_to_try = "-no_tls1_3"
fi
else
protos_to_try = "-no_ssl2"
fi
for proto in $protos_to_try ; do
while true; do
curves_to_test = ""
for ( ( i = low; i < high; i++ ) ) ; do
" ${ ossl_supported [i] } " && ! " ${ supported_curve [i] } " && curves_to_test += " : ${ curves_ossl [i] } "
done
[ [ -z " $curves_to_test " ] ] && break
$OPENSSL s_client $( s_client_options " $proto -cipher " ${ ecdhe_cipher_list : 1 } " -curves " ${ curves_to_test : 1 } " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) & >$TMPFILE </dev/null
sclient_connect_successful $? $TMPFILE || break
temp = $( awk -F': ' '/^Server Temp Key/ { print $2 }' " $TMPFILE " )
curve_found = " ${ temp %%,* } "
if [ [ " $curve_found " = = "ECDH" ] ] ; then
curve_found = " ${ temp #*, } "
curve_found = " ${ curve_found %%,* } "
fi
for ( ( i = low; i < high; i++ ) ) ; do
! " ${ supported_curve [i] } " && [ [ " ${ curves_ossl_output [i] } " = = " $curve_found " ] ] && break
done
[ [ $i -eq $high ] ] && break
supported_curve[ i] = true
done
done
done
fi
if " $ecdhe_offered " && " $using_sockets " ; then
protos_to_try = "03"
" $pfs_tls13_offered " && protos_to_try = "04 03"
for proto in $protos_to_try ; do
if [ [ " $proto " = = "03" ] ] ; then
ecdhe_cipher_list_hex = " $( strip_inconsistent_ciphers "03" " $ecdhe_cipher_list_hex " ) "
[ [ -z " $ecdhe_cipher_list_hex " ] ] && continue
fi
2016-12-08 18:36:45 +01:00
while true; do
curves_to_test = ""
2017-11-02 16:28:09 +01:00
for ( ( i = 0; i < nr_curves; i++ ) ) ; do
! " ${ supported_curve [i] } " && curves_to_test += " , ${ curves_hex [i] } "
2016-12-08 18:36:45 +01:00
done
[ [ -z " $curves_to_test " ] ] && break
2017-11-02 16:28:09 +01:00
len1 = $( printf "%02x" " $(( 2 * ${# curves_to_test } / 7 )) " )
len2 = $( printf "%02x" " $(( 2 * ${# curves_to_test } / 7 + 2 )) " )
tls_sockets " $proto " " ${ ecdhe_cipher_list_hex : 2 } " "ephemeralkey" " 00, 0a, 00, $len2 , 00, $len1 , ${ curves_to_test : 2 } "
sclient_success = $?
[ [ $sclient_success -ne 0 ] ] && [ [ $sclient_success -ne 2 ] ] && break
temp = $( awk -F': ' '/^Server Temp Key/ { print $2 }' " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
2017-10-11 16:59:13 +02:00
curve_found = " ${ temp %%,* } "
2017-10-11 21:20:49 +02:00
if [ [ " $curve_found " = = "ECDH" ] ] ; then
curve_found = " ${ temp #*, } "
curve_found = " ${ curve_found %%,* } "
fi
2017-11-02 16:28:09 +01:00
for ( ( i = 0; i < nr_curves; i++ ) ) ; do
2016-12-08 18:36:45 +01:00
! " ${ supported_curve [i] } " && [ [ " ${ curves_ossl_output [i] } " = = " $curve_found " ] ] && break
done
2017-11-02 16:28:09 +01:00
[ [ $i -eq $nr_curves ] ] && break
2016-12-08 18:36:45 +01:00
supported_curve[ i] = true
2016-07-14 19:23:50 +02:00
done
done
2016-12-08 18:36:45 +01:00
fi
if " $ecdhe_offered " ; then
for ( ( i = 0; i < nr_curves; i++ ) ) ; do
2017-02-09 17:36:24 +01:00
" ${ supported_curve [i] } " && curves_offered += " ${ curves_ossl [i] } "
2016-07-11 17:00:56 +02:00
done
if [ [ -n " $curves_offered " ] ] ; then
2016-07-16 20:48:56 +02:00
" $WIDE " && outln
2017-02-09 17:36:24 +01:00
pr_bold " Elliptic curves offered: "
2017-03-28 19:54:54 +02:00
out_row_aligned_max_width_by_entry " $curves_offered " " " $TERM_WIDTH pr_ecdh_curve_quality
2017-02-09 17:36:24 +01:00
outln
2016-07-11 18:44:28 +02:00
fileout "ecdhe_curves" "INFO" " Elliptic curves offered $curves_offered "
2016-07-11 17:00:56 +02:00
fi
fi
2017-11-02 16:28:09 +01:00
if " $using_sockets " && ( " $pfs_tls13_offered " || ( " $ffdhe_offered " && " $EXPERIMENTAL " ) ) ; then
# find out what groups from RFC 7919 are supported.
nr_curves = 0
for curve in " ${ ffdhe_groups_output [@] } " ; do
supported_curve[ nr_curves] = false
nr_curves += 1
done
protos_to_try = ""
" $pfs_tls13_offered " && protos_to_try = "04"
if " $ffdhe_offered " && " $EXPERIMENTAL " ; then
# Check to see whether RFC 7919 is supported (see Section 4 of RFC 7919)
tls_sockets "03" " ${ ffdhe_cipher_list_hex : 2 } " "ephemeralkey" "00, 0a, 00, 04, 00, 02, 01, fb"
sclient_success = $?
if [ [ $sclient_success -ne 0 ] ] && [ [ $sclient_success -ne 2 ] ] ; then
if " $pfs_tls13_offered " ; then
protos_to_try = "04 03"
else
protos_to_try = "03"
fi
fi
fi
for proto in $protos_to_try ; do
2016-12-08 18:36:45 +01:00
while true; do
curves_to_test = ""
for ( ( i = 0; i < nr_curves; i++ ) ) ; do
! " ${ supported_curve [i] } " && curves_to_test += " , ${ ffdhe_groups_hex [i] } "
done
[ [ -z " $curves_to_test " ] ] && break
len1 = $( printf "%02x" " $(( 2 * ${# curves_to_test } / 7 )) " )
len2 = $( printf "%02x" " $(( 2 * ${# curves_to_test } / 7 + 2 )) " )
2017-11-02 16:28:09 +01:00
tls_sockets " $proto " " ${ ffdhe_cipher_list_hex : 2 } " "ephemeralkey" " 00, 0a, 00, $len2 , 00, $len1 , ${ curves_to_test : 2 } "
2016-12-08 18:36:45 +01:00
sclient_success = $?
[ [ $sclient_success -ne 0 ] ] && [ [ $sclient_success -ne 2 ] ] && break
temp = $( awk -F': ' '/^Server Temp Key/ { print $2 }' " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
2017-10-11 21:20:49 +02:00
curve_found = " ${ temp #*, } "
curve_found = " ${ curve_found %%,* } "
2017-02-24 16:22:59 +01:00
[ [ ! " $curve_found " = ~ ffdhe ] ] && break
2016-12-08 18:36:45 +01:00
for ( ( i = 0; i < nr_curves; i++ ) ) ; do
! " ${ supported_curve [i] } " && [ [ " ${ ffdhe_groups_output [i] } " = = " $curve_found " ] ] && break
done
[ [ $i -eq $nr_curves ] ] && break
supported_curve[ i] = true
done
curves_offered = ""
for ( ( i = 0; i < nr_curves; i++ ) ) ; do
" ${ supported_curve [i] } " && curves_offered += " ${ ffdhe_groups_output [i] } "
done
if [ [ -n " $curves_offered " ] ] ; then
pr_bold " RFC 7919 DH groups offered: "
outln " $curves_offered "
fileout "rfc7919_groups" "INFO" " RFC 7919 DH groups offered $curves_offered "
fi
2017-11-02 16:28:09 +01:00
done
2016-12-08 18:36:45 +01:00
fi
2017-11-02 16:28:09 +01:00
outln
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
2016-12-08 18:36:45 +01:00
" $using_sockets " && HAS_DH_BITS = " $has_dh_bits "
2015-09-21 14:03:48 +02:00
# sub1_curves
2016-02-20 21:46:17 +01:00
if " $pfs_offered " ; then
return 0
else
return 1
fi
2015-05-17 22:43:53 +02:00
}
# good source for configuration and bugs: https://wiki.mozilla.org/Security/Server_Side_TLS
# good start to read: http://en.wikipedia.org/wiki/Transport_Layer_Security#Attacks_against_TLS.2FSSL
2015-06-29 23:28:37 +02:00
2015-05-17 22:43:53 +02:00
spdy_pre( ) {
2016-10-11 22:30:30 +02:00
if [ [ -n " $STARTTLS " ] ] || [ [ " $SERVICE " != HTTP ] ] ; then
2015-09-17 15:30:15 +02:00
[ [ -n " $1 " ] ] && out " $1 "
2016-01-23 19:18:33 +01:00
out "(SPDY is an HTTP protocol and thus not tested here)"
2016-01-23 23:33:17 +01:00
fileout "spdy_npn" "INFO" "SPDY/NPN : (SPY is an HTTP protocol and thus not tested here)"
2015-09-17 15:30:15 +02:00
return 1
fi
2015-09-21 16:43:47 +02:00
if [ [ -n " $PROXY " ] ] ; then
2017-02-15 19:40:06 +01:00
[ [ -n " $1 " ] ] && pr_warning " $1 "
2016-03-05 21:07:49 +01:00
pr_warning "not tested as proxies do not support proxying it"
fileout "spdy_npn" "WARN" "SPDY/NPN : not tested as proxies do not support proxying it"
2015-09-17 15:30:15 +02:00
return 1
fi
2016-03-05 21:07:49 +01:00
if ! " $HAS_SPDY " ; then
2017-02-25 16:31:30 +01:00
pr_local_problem " $OPENSSL doesn't support SPDY/NPN " ;
2016-01-23 23:33:17 +01:00
fileout "spdy_npn" "WARN" " SPDY/NPN : not tested $OPENSSL doesn't support SPDY/NPN "
2015-09-17 15:30:15 +02:00
return 7
fi
return 0
2015-05-17 22:43:53 +02:00
}
2015-12-13 01:20:57 +01:00
http2_pre( ) {
2016-10-11 22:30:30 +02:00
if [ [ -n " $STARTTLS " ] ] || [ [ " $SERVICE " != HTTP ] ] ; then
2015-12-13 01:20:57 +01:00
[ [ -n " $1 " ] ] && out " $1 "
2015-12-29 17:07:03 +01:00
outln "(HTTP/2 is a HTTP protocol and thus not tested here)"
2016-01-23 23:33:17 +01:00
fileout "https_alpn" "INFO" "HTTP2/ALPN : HTTP/2 is and HTTP protocol and thus not tested"
2015-12-13 01:20:57 +01:00
return 1
fi
if [ [ -n " $PROXY " ] ] ; then
2016-03-05 21:07:49 +01:00
[ [ -n " $1 " ] ] && pr_warning " $1 "
pr_warning "not tested as proxies do not support proxying it"
fileout "https_alpn" "WARN" "HTTP2/ALPN : HTTP/2 was not tested as proxies do not support proxying it"
2015-12-13 01:20:57 +01:00
return 1
fi
2016-12-23 17:02:31 +01:00
if ! " $HAS_ALPN " && " $SSL_NATIVE " ; then
2017-02-25 16:31:30 +01:00
prln_local_problem " $OPENSSL doesn't support HTTP2/ALPN " ;
2016-01-23 23:33:17 +01:00
fileout "https_alpn" "WARN" " HTTP2/ALPN : HTTP/2 was not tested as $OPENSSL does not support it "
2015-12-13 01:20:57 +01:00
return 7
fi
return 0
}
2015-06-29 23:28:37 +02:00
run_spdy( ) {
2015-09-17 15:30:15 +02:00
local tmpstr
local -i ret = 0
Remove test of version tolerance
PR #346 added a test for version tolerance to `run_protocols()`, but I think it may now be more appropriate to remove that test. Draft -16 of TLS 1.3, which was posted on September 22, changed the way that version negotiation is handled for TLS 1.3 and above. The current version tolerance test sends a ClientHello with the version field set to "03, 05", to represent a TLS 1.4 ClientHello. While this was consistent with RFC 5246 and with drafts of TLS 1.3 up to -15, draft -16 changed the version field to `legacy_version` and declared that its value should be "03, 03" for TLS 1.2 and above. (For TLS 1.3 and above a Supported Versions extension is included to inform the server which versions of TLS the client supports.) The change in draft -16 was made as a result of the problems with servers not handling version negotiation correctly.
Since the current draft suggests that a server should never be presented with a ClientHello with a version higher than "03, 03" (even for clients that support TLS versions higher than 1.2), it seems there is no reason to include the version tolerance test anymore.
For servers that do not support TLS 1.2, the additional checks that were added by PR #346 will already detect if the server cannot perform version negotiation correctly.
2016-10-11 17:01:04 +02:00
pr_bold " SPDY/NPN "
2017-02-15 19:40:06 +01:00
if ! spdy_pre; then
2015-12-13 05:58:52 +01:00
outln
2015-09-17 15:30:15 +02:00
return 0
fi
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " -connect $NODEIP : $PORT $BUGS $SNI -nextprotoneg " $NPN_PROTOs "" ) </dev/null 2>$ERRFILE >$TMPFILE
2015-09-17 15:30:15 +02:00
tmpstr = $( grep -a '^Protocols' $TMPFILE | sed 's/Protocols.*: //' )
if [ [ -z " $tmpstr " ] ] || [ [ " $tmpstr " = = " " ] ] ; then
2015-10-15 14:15:07 +02:00
outln "not offered"
2016-01-23 23:33:17 +01:00
fileout "spdy_npn" "INFO" "SPDY/NPN : not offered"
2015-09-17 15:30:15 +02:00
ret = 1
else
# now comes a strange thing: "Protocols advertised by server:" is empty but connection succeeded
2017-03-18 21:01:55 +01:00
if egrep -aq "h2|spdy|http" <<< $tmpstr ; then
2016-01-23 19:18:33 +01:00
out " $tmpstr "
2015-11-03 10:30:59 +01:00
outln " (advertised)"
2016-01-23 23:33:17 +01:00
fileout "spdy_npn" "INFO" " SPDY/NPN : $tmpstr (advertised) "
2015-09-17 15:30:15 +02:00
ret = 0
else
2017-02-25 16:31:30 +01:00
prln_cyan "please check manually, server response was ambiguous ..."
2016-04-21 18:04:33 +02:00
fileout "spdy_npn" "INFO" "SPDY/NPN : please check manually, server response was ambiguous ..."
2015-09-17 15:30:15 +02:00
ret = 10
fi
fi
2015-12-13 01:20:57 +01:00
#outln
2015-09-17 15:30:15 +02:00
# btw: nmap can do that too http://nmap.org/nsedoc/scripts/tls-nextprotoneg.html
# nmap --script=tls-nextprotoneg #NODE -p $PORT is your friend if your openssl doesn't want to test this
tmpfile_handle $FUNCNAME .txt
return $ret
2015-05-17 22:43:53 +02:00
}
2015-07-07 22:59:31 +02:00
2015-12-13 01:20:57 +01:00
run_http2( ) {
2016-12-23 17:02:31 +01:00
local tmpstr alpn_extn len
2016-01-23 19:18:33 +01:00
local -i ret = 0
2015-12-24 23:00:23 +01:00
local had_alpn_proto = false
2016-01-23 19:18:33 +01:00
local alpn_finding = ""
2015-12-13 01:20:57 +01:00
Remove test of version tolerance
PR #346 added a test for version tolerance to `run_protocols()`, but I think it may now be more appropriate to remove that test. Draft -16 of TLS 1.3, which was posted on September 22, changed the way that version negotiation is handled for TLS 1.3 and above. The current version tolerance test sends a ClientHello with the version field set to "03, 05", to represent a TLS 1.4 ClientHello. While this was consistent with RFC 5246 and with drafts of TLS 1.3 up to -15, draft -16 changed the version field to `legacy_version` and declared that its value should be "03, 03" for TLS 1.2 and above. (For TLS 1.3 and above a Supported Versions extension is included to inform the server which versions of TLS the client supports.) The change in draft -16 was made as a result of the problems with servers not handling version negotiation correctly.
Since the current draft suggests that a server should never be presented with a ClientHello with a version higher than "03, 03" (even for clients that support TLS versions higher than 1.2), it seems there is no reason to include the version tolerance test anymore.
For servers that do not support TLS 1.2, the additional checks that were added by PR #346 will already detect if the server cannot perform version negotiation correctly.
2016-10-11 17:01:04 +02:00
pr_bold " HTTP2/ALPN "
2017-02-15 19:40:06 +01:00
if ! http2_pre; then
2015-12-13 05:58:52 +01:00
outln
2015-12-13 01:20:57 +01:00
return 0
fi
for proto in $ALPN_PROTOs ; do
# for some reason OpenSSL doesn't list the advertised protocols, so instead try common protocols
2016-12-23 17:02:31 +01:00
if " $HAS_ALPN " ; then
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " -connect $NODEIP : $PORT $BUGS $SNI -alpn $proto " ) </dev/null 2>$ERRFILE >$TMPFILE
2016-12-23 17:02:31 +01:00
else
alpn_extn = " $( printf "%02x" ${# proto } ) , $( string_to_asciihex " $proto " ) "
len = " $( printf "%04x" $(( ${# proto } + 1 )) ) "
alpn_extn = " ${ len : 0 : 2 } , ${ len : 2 : 2 } , $alpn_extn "
len = " $( printf "%04x" $(( ${# proto } + 3 )) ) "
alpn_extn = " 00,10, ${ len : 0 : 2 } , ${ len : 2 : 2 } , $alpn_extn "
tls_sockets "03" " $TLS12_CIPHER " "all" " $alpn_extn "
if [ [ -r " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " ] ] ; then
cp " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " $TMPFILE
else
echo "" > $TMPFILE
fi
fi
2015-12-24 23:00:23 +01:00
#tmpstr=$(grep -a '^ALPN protocol' $TMPFILE | sed 's/ALPN protocol.*: //')
#tmpstr=$(awk '/^ALPN protocol*:/ { print $2 }' $TMPFILE)
tmpstr = $( awk -F':' '/^ALPN protocol*:/ { print $2 }' $TMPFILE )
if [ [ " $tmpstr " = = *" $proto " ] ] ; then
if ! $had_alpn_proto ; then
2015-12-13 01:20:57 +01:00
out " $proto "
2016-01-23 19:18:33 +01:00
alpn_finding += " $proto "
2015-12-24 23:00:23 +01:00
had_alpn_proto = true
2015-12-13 01:20:57 +01:00
else
out " , $proto "
2016-01-23 19:18:33 +01:00
alpn_finding += " , $proto "
2015-12-13 01:20:57 +01:00
fi
fi
done
2015-12-24 23:00:23 +01:00
if $had_alpn_proto ; then
2015-12-13 01:20:57 +01:00
outln " (offered)"
2016-02-16 09:54:01 +01:00
fileout "https_alpn" "INFO" " HTTP2/ALPN : offered; Protocols: $alpn_finding "
2015-12-13 01:20:57 +01:00
ret = 0
else
outln "not offered"
2016-01-23 23:33:17 +01:00
fileout "https_alpn" "INFO" "HTTP2/ALPN : not offered"
2015-12-13 01:20:57 +01:00
ret = 1
fi
tmpfile_handle $FUNCNAME .txt
return $ret
}
2015-07-07 22:59:31 +02:00
# arg1: string to send
# arg2: possible success strings a egrep pattern, needed!
starttls_line( ) {
debugme echo -e " \n=== sending \" $1 \" ... "
echo -e " $1 " >& 5
2016-01-23 19:18:33 +01:00
# we don't know how much to read and it's blocking! So we just put a cat into the
2015-09-17 15:30:15 +02:00
# background and read until $STARTTLS_SLEEP and: cross our fingers
cat <& 5 >$TMPFILE &
2015-07-07 22:59:31 +02:00
wait_kill $! $STARTTLS_SLEEP
debugme echo "... received result: "
debugme cat $TMPFILE
2015-09-03 12:14:47 +02:00
if [ [ -n " $2 " ] ] ; then
2015-07-07 22:59:31 +02:00
if egrep -q " $2 " $TMPFILE ; then
debugme echo " ---> reply matched \" $2 \" "
else
2016-01-23 19:18:33 +01:00
# slow down for exim and friends who need a proper handshake:, see
2015-10-15 15:14:37 +02:00
# https://github.com/drwetter/testssl.sh/issues/218
FAST_STARTTLS = false
debugme echo -e " \n=== sending with automated FAST_STARTTLS=false \" $1 \" ... "
echo -e " $1 " >& 5
cat <& 5 >$TMPFILE &
debugme echo "... received result: "
debugme cat $TMPFILE
if [ [ -n " $2 " ] ] ; then
debugme echo " ---> reply with automated FAST_STARTTLS=false matched \" $2 \" "
else
debugme echo " ---> reply didn't match \" $2 \", see $TMPFILE "
2017-05-19 20:28:18 +02:00
pr_warning "STARTTLS handshake problem. "
2015-10-15 15:14:37 +02:00
outln "Either switch to native openssl (--ssl-native), "
outln " give the server more time to reply (STARTTLS_SLEEP=<seconds> ./testssh.sh ..) -- "
outln " or debug what happened (add --debug=2)"
return 3
fi
2015-07-07 22:59:31 +02:00
fi
fi
2015-09-17 15:30:15 +02:00
return 0
2015-07-07 22:59:31 +02:00
}
2017-07-01 00:54:39 +02:00
# Line based send with newline characters appended
2015-07-08 21:30:31 +02:00
starttls_just_send( ) {
2016-11-22 18:57:21 +01:00
debugme echo -e " C: $1 "
echo -ne " $1 \r\n " >& 5
2015-07-08 21:30:31 +02:00
}
2017-07-01 00:54:39 +02:00
# Stream based send
starttls_just_send2( ) {
debugme echo -e " C: $1 "
echo -ne " $1 " >& 5
}
2015-07-07 22:59:31 +02:00
starttls_just_read( ) {
debugme echo "=== just read banner ==="
2015-09-17 15:30:15 +02:00
if [ [ " $DEBUG " -ge 2 ] ] ; then
cat <& 5 &
wait_kill $! $STARTTLS_SLEEP
else
dd of = /dev/null count = 8 <& 5 2>/dev/null &
wait_kill $! $STARTTLS_SLEEP
fi
return 0
2015-07-07 22:59:31 +02:00
}
2016-11-22 18:57:21 +01:00
starttls_full_read( ) {
starttls_read_data = ( )
local one_line = ""
local ret = 0
local cont_pattern = " $1 "
local end_pattern = " $2 "
local ret_found = 0
if [ [ $# -ge 3 ] ] ; then
debugme echo " === we have to search for $3 pattern === "
ret_found = 3
fi
debugme echo "=== full read banner ==="
local oldIFS = " $IFS "
IFS = ''
2017-04-30 19:57:40 +02:00
while read -r -t $STARTTLS_SLEEP one_line; ret = $? ; ( exit $ret ) ; do
2016-11-22 18:57:21 +01:00
debugme echo " S: ${ one_line } "
if [ [ $# -ge 3 ] ] ; then
if [ [ ${ one_line } = ~ $3 ] ] ; then
ret_found = 0
debugme echo "^^^^^^^ that's what we were looking for ==="
fi
fi
starttls_read_data += ( " ${ one_line } " )
if [ [ ${ one_line } = ~ ${ end_pattern } ] ] ; then
debugme echo "=== full read finished ==="
IFS = " ${ oldIFS } "
return ${ ret_found }
fi
if [ [ ! ${ one_line } = ~ ${ cont_pattern } ] ] ; then
debugme echo " === full read syntax error, expected regex pattern ${ cont_pattern } (cont) or ${ end_pattern } (end) === "
IFS = " ${ oldIFS } "
return 2
fi
done <& 5
debugme echo "=== full read error/timeout ==="
IFS = " ${ oldIFS } "
return $ret
}
starttls_ftp_dialog( ) {
debugme echo "=== starting ftp STARTTLS dialog ==="
local reAUTHTLS = '^ AUTH TLS'
starttls_full_read '^220-' '^220 ' && debugme echo "received server greeting" &&
starttls_just_send 'FEAT' && debugme echo "sent FEAT" &&
starttls_full_read '^(211-| )' '^211 ' " ${ reAUTHTLS } " && debugme echo "received server features and checked STARTTLS availability" &&
starttls_just_send 'AUTH TLS' && debugme echo "initiated STARTTLS" &&
starttls_full_read '^234-' '^234 ' && debugme echo "received ack for STARTTLS"
local ret = $?
debugme echo " === finished ftp STARTTLS dialog with ${ ret } === "
return $ret
}
starttls_smtp_dialog( ) {
debugme echo "=== starting smtp STARTTLS dialog ==="
local re250STARTTLS = '^250[ -]STARTTLS'
starttls_full_read '^220-' '^220 ' && debugme echo "received server greeting" &&
starttls_just_send 'EHLO testssl.sh' && debugme echo "sent EHLO" &&
starttls_full_read '^250-' '^250 ' " ${ re250STARTTLS } " && debugme echo "received server capabilities and checked STARTTLS availability" &&
starttls_just_send 'STARTTLS' && debugme echo "initiated STARTTLS" &&
starttls_full_read '^220-' '^220 ' && debugme echo "received ack for STARTTLS"
local ret = $?
debugme echo " === finished smtp STARTTLS dialog with ${ ret } === "
return $ret
}
starttls_pop3_dialog( ) {
debugme echo "=== starting pop3 STARTTLS dialog ==="
starttls_full_read '$^' '^+OK' && debugme echo "received server greeting" &&
starttls_just_send 'STLS' && debugme echo "initiated STARTTLS" &&
starttls_full_read '$^' '^+OK' && debugme echo "received ack for STARTTLS"
local ret = $?
debugme echo " === finished pop3 STARTTLS dialog with ${ ret } === "
return $ret
}
starttls_imap_dialog( ) {
debugme echo "=== starting imap STARTTLS dialog ==="
local reSTARTTLS = '^\* CAPABILITY(( .*)? IMAP4rev1( .*)? STARTTLS( .*)?|( .*)? STARTTLS( .*)? IMAP4rev1( .*)?)$'
starttls_full_read '^\* ' '^\* OK ' && debugme echo "received server greeting" &&
starttls_just_send 'a001 CAPABILITY' && debugme echo "sent CAPABILITY" &&
starttls_full_read '^\* ' '^a001 OK ' " ${ reSTARTTLS } " && debugme echo "received server capabilities and checked STARTTLS availability" &&
starttls_just_send 'a002 STARTTLS' && debugme echo "initiated STARTTLS" &&
starttls_full_read '^\* ' '^a002 OK ' && debugme echo "received ack for STARTTLS"
local ret = $?
debugme echo " === finished imap STARTTLS dialog with ${ ret } === "
return $ret
}
starttls_nntp_dialog( ) {
debugme echo "=== starting nntp STARTTLS dialog ==="
starttls_full_read '$^' '^20[01] ' && debugme echo "received server greeting" &&
starttls_just_send 'CAPABILITIES' && debugme echo "sent CAPABILITIES" &&
starttls_full_read '$^' '^101 ' &&
starttls_full_read '' '^\.$' " ^STARTTLS $" && debugme echo "received server capabilities and checked STARTTLS availability" &&
starttls_just_send 'STARTTLS' && debugme echo "initiated STARTTLS" &&
starttls_full_read '$^' '^382 ' && debugme echo "received ack for STARTTLS"
local ret = $?
debugme echo " === finished nntp STARTTLS dialog with ${ ret } === "
return $ret
}
2015-07-07 22:59:31 +02:00
2016-12-08 19:54:44 +01:00
starttls_postgres_dialog( ) {
debugme echo "=== starting postgres STARTTLS dialog ==="
2017-07-01 00:57:41 +02:00
local init_tls = "\x00\x00\x00\x08\x04\xD2\x16\x2F"
starttls_just_send " ${ init_tls } " && debugme echo "initiated STARTTLS" &&
2016-12-08 19:54:44 +01:00
starttls_full_read '' '' 'S' && debugme echo "received ack for STARTTLS"
local ret = $?
debugme echo " === finished postgres STARTTLS dialog with ${ ret } === "
return $ret
}
2017-06-29 23:57:32 +02:00
starttls_mysql_dialog( ) {
debugme echo "=== starting mysql STARTTLS dialog ==="
2017-07-01 00:54:39 +02:00
local login_request = "
, 20, 00, 00, 01, # payload_length, sequence_id
85, ae, ff, 00, # capability flags, CLIENT_SSL always set
00, 00, 00, 01, # max-packet size
21, # character set
00, 00, 00, 00, 00, 00, 00, 00, # string[23] reserved (all [0])
00, 00, 00, 00, 00, 00, 00, 00,
00, 00, 00, 00, 00, 00, 00"
code2network " ${ login_request } "
starttls_just_read && debugme echo -e "\nreceived server greeting" &&
starttls_just_send2 " $NW_STR " && debugme echo "initiated STARTTLS"
# TODO: We could detect if the server supports STARTTLS via the "Server Capabilities"
# bit field, but we'd need to parse the binary stream, with greater precision than regex.
local ret = $?
2017-06-29 23:57:32 +02:00
debugme echo " === finished mysql STARTTLS dialog with ${ ret } === "
return $ret
}
2015-05-17 22:43:53 +02:00
# arg for a fd doesn't work here
fd_socket( ) {
2015-09-17 15:30:15 +02:00
local jabber = ""
local proyxline = ""
2015-09-26 22:44:33 +02:00
local nodeip = " $( tr -d '[]' <<< $NODEIP ) " # sockets do not need the square brackets we have of IPv6 addresses
# we just need do it here, that's all!
2015-09-17 15:30:15 +02:00
if [ [ -n " $PROXY " ] ] ; then
if ! exec 5<> /dev/tcp/${ PROXYIP } /${ PROXYPORT } ; then
outln
2017-05-19 20:28:18 +02:00
pr_warning " $PROG_NAME : unable to open a socket to proxy $PROXYIP : $PROXYPORT "
2015-09-17 15:30:15 +02:00
return 6
fi
2017-05-19 20:28:18 +02:00
if " $DNS_VIA_PROXY " ; then
echo -e " CONNECT $NODE : $PORT HTTP/1.0\n " >& 5
else
echo -e " CONNECT $nodeip : $PORT HTTP/1.0\n " >& 5
fi
2015-09-17 15:30:15 +02:00
while true ; do
read proyxline <& 5
if [ [ " ${ proyxline %/* } " = = "HTTP" ] ] ; then
proyxline = ${ proyxline #* }
if [ [ " ${ proyxline %% * } " != "200" ] ] ; then
2017-05-19 20:28:18 +02:00
pr_warning "Unable to CONNECT via proxy. "
2017-02-25 16:31:30 +01:00
[ [ " $PORT " != 443 ] ] && prln_magenta " Check whether your proxy supports port $PORT and the underlying protocol. "
2015-09-17 15:30:15 +02:00
return 6
fi
fi
if [ [ " $proyxline " = = $'\r' ] ] ; then
break
fi
done
2015-09-26 22:44:33 +02:00
elif ! exec 5<>/dev/tcp/$nodeip /$PORT ; then # 2>/dev/null would remove an error message, but disables debugging
2015-09-17 15:30:15 +02:00
outln
2017-05-19 20:28:18 +02:00
pr_warning " Unable to open a socket to $NODEIP : $PORT . "
2015-09-17 15:30:15 +02:00
# It can last ~2 minutes but for for those rare occasions we don't do a timeout handler here, KISS
return 6
fi
if [ [ -n " $STARTTLS " ] ] ; then
case " $STARTTLS_PROTOCOL " in # port
2016-11-22 18:57:21 +01:00
ftp| ftps) # https://tools.ietf.org/html/rfc4217, https://tools.ietf.org/html/rfc959
starttls_ftp_dialog
2015-09-17 15:30:15 +02:00
; ;
2016-11-22 18:57:21 +01:00
smtp| smtps) # SMTP, see https://tools.ietf.org/html/rfc5321, https://tools.ietf.org/html/rfc3207
starttls_smtp_dialog
2015-09-17 15:30:15 +02:00
; ;
pop3| pop3s) # POP, see https://tools.ietf.org/html/rfc2595
2016-11-22 18:57:21 +01:00
starttls_pop3_dialog
2015-09-17 15:30:15 +02:00
; ;
nntp| nntps) # NNTP, see https://tools.ietf.org/html/rfc4642
2016-11-22 18:57:21 +01:00
starttls_nntp_dialog
2015-09-17 15:30:15 +02:00
; ;
2016-11-22 18:57:21 +01:00
imap| imaps) # IMAP, https://tools.ietf.org/html/rfc2595, https://tools.ietf.org/html/rfc3501
starttls_imap_dialog
2015-09-17 15:30:15 +02:00
; ;
ldap| ldaps) # LDAP, https://tools.ietf.org/html/rfc2830, https://tools.ietf.org/html/rfc4511
fatal "FIXME: LDAP+STARTTLS over sockets not yet supported (try \"--ssl-native\")" -4
; ;
acap| acaps) # ACAP = Application Configuration Access Protocol, see https://tools.ietf.org/html/rfc2595
fatal "ACAP Easteregg: not implemented -- probably never will" -4
; ;
xmpp| xmpps) # XMPP, see https://tools.ietf.org/html/rfc6120
starttls_just_read
[ [ -z $XMPP_HOST ] ] && XMPP_HOST = " $NODE "
jabber = $( cat <<EOF
2015-07-07 22:59:31 +02:00
<?xml version = '1.0' ?>
2016-01-23 19:18:33 +01:00
<stream:stream
xmlns:stream= 'http://etherx.jabber.org/streams'
xmlns = 'jabber:client'
2015-07-07 22:59:31 +02:00
to = '$XMPP_HOST'
xml:lang= 'en'
version = '1.0' >
EOF
)
2015-09-17 15:30:15 +02:00
starttls_line " $jabber "
starttls_line "<starttls xmlns='urn:ietf:params:xml:ns:xmpp-tls'/>" "proceed"
# BTW: https://xmpp.net !
; ;
2017-06-29 23:39:22 +02:00
postgres) # Postgres SQL, see http://www.postgresql.org/docs/devel/static/protocol-message-formats.html
2016-12-08 19:54:44 +01:00
starttls_postgres_dialog
; ;
2017-06-29 23:57:32 +02:00
mysql) # MySQL, see https://dev.mysql.com/doc/internals/en/x-protocol-lifecycle-lifecycle.html#x-protocol-lifecycle-tls-extension
starttls_mysql_dialog
; ;
2015-09-17 15:30:15 +02:00
*) # we need to throw an error here -- otherwise testssl.sh treats the STARTTLS protocol as plain SSL/TLS which leads to FP
fatal " FIXME: STARTTLS protocol $STARTTLS_PROTOCOL is not yet supported " -4
esac
fi
2015-07-07 22:59:31 +02:00
2015-06-29 22:29:15 +02:00
return 0
2015-05-17 22:43:53 +02:00
}
close_socket( ) {
2015-09-17 15:30:15 +02:00
exec 5<& -
exec 5>& -
return 0
2015-05-17 22:43:53 +02:00
}
# first: helper function for protocol checks
2017-03-25 13:23:21 +01:00
# arg1: formatted string here in the code
2015-05-17 22:43:53 +02:00
code2network( ) {
2017-02-24 16:22:59 +01:00
NW_STR = $( sed -e 's/,/\\\x/g' <<< " $1 " | sed -e 's/# .*$//g' -e 's/ //g' -e '/^$/d' | tr -d '\n' | tr -d '\t' )
2015-05-17 22:43:53 +02:00
}
len2twobytes( ) {
2015-08-28 00:15:51 +02:00
local len_arg1 = ${# 1 }
[ [ $len_arg1 -le 2 ] ] && LEN_STR = $( printf "00, %02s \n" " $1 " )
[ [ $len_arg1 -eq 3 ] ] && LEN_STR = $( printf "%02s, %02s \n" " ${ 1 : 0 : 1 } " " ${ 1 : 1 : 2 } " )
[ [ $len_arg1 -eq 4 ] ] && LEN_STR = $( printf "%02s, %02s \n" " ${ 1 : 0 : 2 } " " ${ 1 : 2 : 2 } " )
2015-05-17 22:43:53 +02:00
}
socksend_sslv2_clienthello( ) {
2015-09-17 15:30:15 +02:00
local data = ""
2015-08-28 00:15:51 +02:00
2015-09-17 15:30:15 +02:00
code2network " $1 "
data = " $NW_STR "
[ [ " $DEBUG " -ge 4 ] ] && echo " \" $data \" "
printf -- " $data " >& 5 2>/dev/null &
sleep $USLEEP_SND
2015-05-17 22:43:53 +02:00
}
# for SSLv2 to TLS 1.2:
sockread_serverhello( ) {
[ [ -z " $2 " ] ] && maxsleep = $MAX_WAITSOCK || maxsleep = $2
SOCK_REPLY_FILE = $( mktemp $TEMPDIR /ddreply.XXXXXX) || return 7
dd bs = $1 of = $SOCK_REPLY_FILE count = 1 <& 5 2>/dev/null &
2015-09-17 15:30:15 +02:00
wait_kill $! $maxsleep
return $?
2015-05-17 22:43:53 +02:00
}
2017-04-18 23:15:32 +02:00
#trying a faster version
sockread_fast( ) {
dd bs = $1 count = 1 <& 5 2>/dev/null | hexdump -v -e '16/1 "%02X"'
}
2016-09-28 23:15:37 +02:00
get_pub_key_size( ) {
local pubkey pubkeybits
local -i i len1 len
local tmppubkeyfile
# OpenSSL displays the number of bits for RSA and ECC
2017-09-22 20:06:51 +02:00
pubkeybits = $( $OPENSSL x509 -noout -pubkey -in $HOSTCERT 2>>$ERRFILE | $OPENSSL pkey -pubin -text 2>>$ERRFILE | grep -aw "Public-Key:" | sed -e 's/.*(//' -e 's/)//' )
2016-09-28 23:15:37 +02:00
if [ [ -n $pubkeybits ] ] ; then
echo " Server public key is $pubkeybits " >> $TMPFILE
else
# This extracts the public key for DSA, DH, and GOST
tmppubkeyfile = $( mktemp $TEMPDIR /pubkey.XXXXXX) || return 7
2017-09-22 20:06:51 +02:00
$OPENSSL x509 -noout -pubkey -in $HOSTCERT 2>>$ERRFILE | $OPENSSL pkey -pubin -outform DER -out " $tmppubkeyfile " 2>>$ERRFILE
2016-09-28 23:15:37 +02:00
pubkey = $( hexdump -v -e '16/1 "%02X"' " $tmppubkeyfile " )
rm $tmppubkeyfile
2016-11-15 17:32:30 +01:00
[ [ -z " $pubkey " ] ] && return 1
2016-09-28 23:15:37 +02:00
# Skip over tag and length of subjectPublicKeyInfo
i = 2
len1 = " 0x ${ pubkey : i : 2 } "
if [ [ $len1 -lt 0x80 ] ] ; then
i = $i +2
else
len1 = $len1 -0x80
i = $i +2*$len1 +2
fi
# Skip over algorithm field
i = $i +2
len1 = " 0x ${ pubkey : i : 2 } "
i = $i +2
if [ [ $len1 -lt 0x80 ] ] ; then
i = $i +2*$len1
else
case $len1 in
129) len = " 0x ${ pubkey : i : 2 } " ; ;
130) len = " 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*$len +" 0x ${ pubkey : i : 2 } "
; ;
131) len = " 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*$len +" 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*$len +" 0x ${ pubkey : i : 2 } "
; ;
132) len = " 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*$len +" 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*$len +" 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*$len +" 0x ${ pubkey : i : 2 } "
; ;
esac
i = $i +2+2*$len
fi
# Next is the public key BIT STRING. Skip over tag, length, and number of unused bits.
i = $i +2
len1 = " 0x ${ pubkey : i : 2 } "
if [ [ $len1 -lt 0x80 ] ] ; then
i = $i +4
else
len1 = $len1 -0x80
i = $i +2*$len1 +4
fi
# Now get the length of the public key
i = $i +2
len1 = " 0x ${ pubkey : i : 2 } "
i = $i +2
if [ [ $len1 -lt 0x80 ] ] ; then
len = $len1
else
case $len1 in
129) len = " 0x ${ pubkey : i : 2 } " ; ;
130) len = " 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*$len +" 0x ${ pubkey : i : 2 } "
; ;
131) len = " 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*$len +" 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*$len +" 0x ${ pubkey : i : 2 } "
; ;
132) len = " 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*" 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*" 0x ${ pubkey : i : 2 } "
i = $i +2
len = 256*" 0x ${ pubkey : i : 2 } "
; ;
esac
fi
len = 8*$len # convert from bytes to bits
pubkeybits = " $( printf "%d" $len ) "
echo " Server public key is $pubkeybits bit " >> $TMPFILE
fi
2016-11-15 17:32:30 +01:00
return 0
2016-09-28 23:15:37 +02:00
}
2016-12-29 22:31:42 +01:00
# Extract the DH ephemeral key from the ServerKeyExchange message
get_dh_ephemeralkey( ) {
local tls_serverkeyexchange_ascii = " $1 "
local -i tls_serverkeyexchange_ascii_len offset
local dh_p dh_g dh_y dh_param len1 key_bitstring tmp_der_key_file
local -i i dh_p_len dh_g_len dh_y_len dh_param_len
tls_serverkeyexchange_ascii_len = ${# tls_serverkeyexchange_ascii }
dh_p_len = 2*$( hex2dec " ${ tls_serverkeyexchange_ascii : 0 : 4 } " )
offset = 4+$dh_p_len
if [ [ $tls_serverkeyexchange_ascii_len -lt $offset ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
return 1
fi
# Subtract any leading 0 bytes
for ( ( i = 4; i < offset; i = i+2 ) ) ; do
[ [ " ${ tls_serverkeyexchange_ascii : i : 2 } " != "00" ] ] && break
dh_p_len = $dh_p_len -2
done
if [ [ $i -ge $offset ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
return 1
fi
dh_p = " ${ tls_serverkeyexchange_ascii : i : dh_p_len } "
dh_g_len = 2*$( hex2dec " ${ tls_serverkeyexchange_ascii : offset : 4 } " )
i = 4+$offset
offset += 4+$dh_g_len
if [ [ $tls_serverkeyexchange_ascii_len -lt $offset ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
return 1
fi
# Subtract any leading 0 bytes
for ( ( 1; i < offset; i = i+2 ) ) ; do
[ [ " ${ tls_serverkeyexchange_ascii : i : 2 } " != "00" ] ] && break
dh_g_len = $dh_g_len -2
done
if [ [ $i -ge $offset ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
return 1
fi
dh_g = " ${ tls_serverkeyexchange_ascii : i : dh_g_len } "
dh_y_len = 2*$( hex2dec " ${ tls_serverkeyexchange_ascii : offset : 4 } " )
i = 4+$offset
offset += 4+$dh_y_len
if [ [ $tls_serverkeyexchange_ascii_len -lt $offset ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
return 1
fi
# Subtract any leading 0 bytes
for ( ( 1; i < offset; i = i+2 ) ) ; do
[ [ " ${ tls_serverkeyexchange_ascii : i : 2 } " != "00" ] ] && break
dh_y_len = $dh_y_len -2
done
if [ [ $i -ge $offset ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
return 1
fi
dh_y = " ${ tls_serverkeyexchange_ascii : i : dh_y_len } "
# The following code assumes that all lengths can be encoded using at most 2 bytes,
# which just means that the encoded length of the public key must be less than
# 65,536 bytes. If the length is anywhere close to that, it is almost certainly an
# encoding error.
if [ [ $dh_p_len +$dh_g_len +$dh_y_len -ge 131000 ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
return 1
fi
# make ASN.1 INTEGER of p, g, and Y
[ [ " 0x ${ dh_p : 0 : 1 } " -ge 8 ] ] && dh_p_len += 2 && dh_p = " 00 $dh_p "
if [ [ $dh_p_len -lt 256 ] ] ; then
len1 = " $( printf "%02x" $(( dh_p_len/2)) ) "
elif [ [ $dh_p_len -lt 512 ] ] ; then
len1 = " 81 $( printf "%02x" $(( dh_p_len/2)) ) "
else
len1 = " 82 $( printf "%04x" $(( dh_p_len/2)) ) "
fi
dh_p = " 02 ${ len1 } $dh_p "
[ [ " 0x ${ dh_g : 0 : 1 } " -ge 8 ] ] && dh_g_len += 2 && dh_g = " 00 $dh_g "
if [ [ $dh_g_len -lt 256 ] ] ; then
len1 = " $( printf "%02x" $(( dh_g_len/2)) ) "
elif [ [ $dh_g_len -lt 512 ] ] ; then
len1 = " 81 $( printf "%02x" $(( dh_g_len/2)) ) "
else
len1 = " 82 $( printf "%04x" $(( dh_g_len/2)) ) "
fi
dh_g = " 02 ${ len1 } $dh_g "
[ [ " 0x ${ dh_y : 0 : 1 } " -ge 8 ] ] && dh_y_len += 2 && dh_y = " 00 $dh_y "
if [ [ $dh_y_len -lt 256 ] ] ; then
len1 = " $( printf "%02x" $(( dh_y_len/2)) ) "
elif [ [ $dh_y_len -lt 512 ] ] ; then
len1 = " 81 $( printf "%02x" $(( dh_y_len/2)) ) "
else
len1 = " 82 $( printf "%04x" $(( dh_y_len/2)) ) "
fi
dh_y = " 02 ${ len1 } $dh_y "
# Make a SEQUENCE of p and g
dh_param_len = ${# dh_p } +${# dh_g }
if [ [ $dh_param_len -lt 256 ] ] ; then
len1 = " $( printf "%02x" $(( dh_param_len/2)) ) "
elif [ [ $dh_param_len -lt 512 ] ] ; then
len1 = " 81 $( printf "%02x" $(( dh_param_len/2)) ) "
else
len1 = " 82 $( printf "%04x" $(( dh_param_len/2)) ) "
fi
dh_param = " 30 ${ len1 } ${ dh_p } ${ dh_g } "
2017-02-14 16:18:27 +01:00
2016-12-29 22:31:42 +01:00
# Make a SEQUENCE of the paramters SEQUENCE and the OID
dh_param_len = 22+${# dh_param }
if [ [ $dh_param_len -lt 256 ] ] ; then
len1 = " $( printf "%02x" $(( dh_param_len/2)) ) "
elif [ [ $dh_param_len -lt 512 ] ] ; then
len1 = " 81 $( printf "%02x" $(( dh_param_len/2)) ) "
else
len1 = " 82 $( printf "%04x" $(( dh_param_len/2)) ) "
fi
dh_param = " 30 ${ len1 } 06092A864886F70D010301 ${ dh_param } "
# Encapsulate public key, y, in a BIT STRING
dh_y_len = ${# dh_y } +2
if [ [ $dh_y_len -lt 256 ] ] ; then
len1 = " $( printf "%02x" $(( dh_y_len/2)) ) "
elif [ [ $dh_y_len -lt 512 ] ] ; then
len1 = " 81 $( printf "%02x" $(( dh_y_len/2)) ) "
else
len1 = " 82 $( printf "%04x" $(( dh_y_len/2)) ) "
fi
dh_y = " 03 ${ len1 } 00 $dh_y "
# Create the public key SEQUENCE
i = ${# dh_param } +${# dh_y }
if [ [ $i -lt 256 ] ] ; then
len1 = " $( printf "%02x" $(( i/2)) ) "
elif [ [ $i -lt 512 ] ] ; then
len1 = " 81 $( printf "%02x" $(( i/2)) ) "
else
len1 = " 82 $( printf "%04x" $(( i/2)) ) "
fi
key_bitstring = " 30 ${ len1 } ${ dh_param } ${ dh_y } "
tmp_der_key_file = $( mktemp $TEMPDIR /pub_key_der.XXXXXX) || return 1
asciihex_to_binary_file " $key_bitstring " " $tmp_der_key_file "
key_bitstring = " $( $OPENSSL pkey -pubin -in $tmp_der_key_file -inform DER 2> $ERRFILE ) "
rm $tmp_der_key_file
[ [ -z " $key_bitstring " ] ] && return 1
2017-02-25 16:31:30 +01:00
tm_out " $key_bitstring "
2016-12-29 22:31:42 +01:00
return 0
}
2015-05-17 22:43:53 +02:00
# arg1: name of file with socket reply
2016-09-28 23:15:37 +02:00
# arg2: true if entire server hello should be parsed
2015-06-23 12:58:40 +02:00
parse_sslv2_serverhello( ) {
2016-09-28 23:15:37 +02:00
local ret v2_hello_ascii v2_hello_initbyte v2_hello_length
local v2_hello_handshake v2_cert_type v2_hello_cert_length
local v2_hello_cipherspec_length tmp_der_certfile
local -i certificate_len nr_ciphers_detected offset i
2015-09-17 15:30:15 +02:00
# server hello: in hex representation, see below
# byte 1+2: length of server hello 0123
# 3: 04=Handshake message, server hello 45
# 4: session id hit or not (boolean: 00=false, this 67
# is the normal case)
# 5: certificate type, 01 = x509 89
# 6+7 version (00 02 = SSLv2) 10-13
# 8+9 certificate length 14-17
# 10+11 cipher spec length 17-20
# 12+13 connection id length
# [certificate length] ==> certificate
# [cipher spec length] ==> ciphers GOOD: HERE ARE ALL CIPHERS ALREADY!
local ret = 3
2016-12-13 12:38:20 +01:00
local parse_complete = "false"
2016-09-28 23:15:37 +02:00
if [ [ " $2 " = = "true" ] ] ; then
2016-12-13 12:38:20 +01:00
parse_complete = true
2016-09-28 23:15:37 +02:00
fi
2016-12-13 12:38:20 +01:00
" $parse_complete " && echo "======================================" > $TMPFILE
2015-09-17 15:30:15 +02:00
v2_hello_ascii = $( hexdump -v -e '16/1 "%02X"' $1 )
2017-01-04 16:47:36 +01:00
v2_hello_ascii = " ${ v2_hello_ascii %%[!0-9A-F]* } "
2015-09-17 15:30:15 +02:00
[ [ " $DEBUG " -ge 5 ] ] && echo " $v2_hello_ascii "
if [ [ -z " $v2_hello_ascii " ] ] ; then
2016-12-13 12:38:20 +01:00
ret = 0 # 1 line without any blanks: no server hello received
2015-09-17 15:30:15 +02:00
debugme echo "server hello empty"
else
# now scrape two bytes out of the reply per byte
v2_hello_initbyte = " ${ v2_hello_ascii : 0 : 1 } " # normally this belongs to the next, should be 8!
v2_hello_length = " ${ v2_hello_ascii : 1 : 3 } " # + 0x8000 see above
v2_hello_handshake = " ${ v2_hello_ascii : 4 : 2 } "
2016-09-28 23:15:37 +02:00
v2_cert_type = " ${ v2_hello_ascii : 8 : 2 } "
2015-09-17 15:30:15 +02:00
v2_hello_cert_length = " ${ v2_hello_ascii : 14 : 4 } "
v2_hello_cipherspec_length = " ${ v2_hello_ascii : 18 : 4 } "
V2_HELLO_CIPHERSPEC_LENGTH = $( printf "%d\n" " 0x $v2_hello_cipherspec_length " 2>/dev/null)
[ [ $? -ne 0 ] ] && ret = 7
if [ [ $v2_hello_initbyte != "8" ] ] || [ [ $v2_hello_handshake != "04" ] ] ; then
ret = 1
if [ [ $DEBUG -ge 2 ] ] ; then
echo "no correct server hello"
echo " SSLv2 server init byte: 0x0 $v2_hello_initbyte "
echo " SSLv2 hello handshake : 0x $v2_hello_handshake "
fi
fi
if [ [ $DEBUG -ge 3 ] ] ; then
echo " SSLv2 server hello length: 0x0 $v2_hello_length "
2016-09-28 23:15:37 +02:00
echo " SSLv2 certificate type: 0x $v2_cert_type "
2015-09-17 15:30:15 +02:00
echo " SSLv2 certificate length: 0x $v2_hello_cert_length "
echo " SSLv2 cipher spec length: 0x $v2_hello_cipherspec_length "
fi
2017-01-04 16:47:36 +01:00
if " $parse_complete " && [ [ 2*$( hex2dec " $v2_hello_length " ) -ne ${# v2_hello_ascii } -4 ] ] ; then
ret = 7
fi
2015-09-17 15:30:15 +02:00
fi
2016-10-28 15:30:07 +02:00
2016-12-13 12:38:20 +01:00
" $parse_complete " || return $ret
2016-09-28 23:15:37 +02:00
2016-12-12 15:38:20 +01:00
rm -f $HOSTCERT $TEMPDIR /intermediatecerts.pem
if [ [ $ret -eq 3 ] ] ; then
2016-12-06 17:23:01 +01:00
certificate_len = 2*$( hex2dec " $v2_hello_cert_length " )
2017-01-05 20:20:19 +01:00
2016-12-06 17:23:01 +01:00
if [ [ " $v2_cert_type " = = "01" ] ] && [ [ " $v2_hello_cert_length " != "00" ] ] ; then
tmp_der_certfile = $( mktemp $TEMPDIR /der_cert.XXXXXX) || return $ret
asciihex_to_binary_file " ${ v2_hello_ascii : 26 : certificate_len } " " $tmp_der_certfile "
2017-01-04 16:47:36 +01:00
$OPENSSL x509 -inform DER -in $tmp_der_certfile -outform PEM -out $HOSTCERT 2>$ERRFILE
if [ [ $? -ne 0 ] ] ; then
debugme echo "Malformed certificate in ServerHello."
return 1
fi
2016-12-06 17:23:01 +01:00
rm $tmp_der_certfile
get_pub_key_size
echo "======================================" >> $TMPFILE
fi
2016-09-28 23:15:37 +02:00
2016-12-06 17:23:01 +01:00
# Output list of supported ciphers
2016-09-28 23:15:37 +02:00
let offset = 26+$certificate_len
nr_ciphers_detected = $(( V2_HELLO_CIPHERSPEC_LENGTH / 3 ))
for ( ( i = 0 ; i<nr_ciphers_detected; i++ ) ) ; do
2017-03-25 13:23:21 +01:00
echo " Supported cipher: x $( tolower " ${ v2_hello_ascii : offset : 6 } " ) " >> $TMPFILE
2016-09-28 23:15:37 +02:00
let offset = $offset +6
done
echo "======================================" >> $TMPFILE
2016-10-28 15:30:07 +02:00
2016-09-28 23:15:37 +02:00
tmpfile_handle $FUNCNAME .txt
fi
2015-09-17 15:30:15 +02:00
return $ret
2015-05-17 22:43:53 +02:00
}
2017-10-03 22:10:09 +02:00
# Return:
# 0 if arg1 contains the entire server response.
# 1 if arg1 does not contain the entire server response.
# 2 if the response is malformed.
# 3 if (a) the response version is TLSv1.3;
# (b) arg1 contains the entire ServerHello (and appears to contain the entire response); and
# (c) the entire response is supposed to be parsed
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
# arg1: ASCII-HEX encoded reply
2017-10-03 22:10:09 +02:00
# arg2: whether to process the full request ("all") or just the basic request plus the ephemeral key if any ("ephemeralkey").
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
check_tls_serverhellodone( ) {
local tls_hello_ascii = " $1 "
2017-10-03 22:10:09 +02:00
local process_full = " $2 "
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
local tls_handshake_ascii = "" tls_alert_ascii = ""
local -i i tls_hello_ascii_len tls_handshake_ascii_len tls_alert_ascii_len
2017-10-03 22:10:09 +02:00
local -i msg_len remaining tls_serverhello_ascii_len
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
local tls_content_type tls_protocol tls_handshake_type tls_msg_type
local tls_err_level
2015-05-17 22:43:53 +02:00
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
DETECTED_TLS_VERSION = ""
2015-05-17 22:43:53 +02:00
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
if [ [ -z " $tls_hello_ascii " ] ] ; then
return 0 # no server hello received
fi
tls_hello_ascii_len = ${# tls_hello_ascii }
for ( ( i = 0; i<tls_hello_ascii_len; i = i+msg_len ) ) ; do
remaining = $tls_hello_ascii_len -$i
[ [ $remaining -lt 10 ] ] && return 1
tls_content_type = " ${ tls_hello_ascii : i : 2 } "
[ [ " $tls_content_type " != "15" ] ] && [ [ " $tls_content_type " != "16" ] ] && \
[ [ " $tls_content_type " != "17" ] ] && return 2
i = $i +2
tls_protocol = " ${ tls_hello_ascii : i : 4 } "
2017-10-03 22:10:09 +02:00
[ [ -z " $DETECTED_TLS_VERSION " ] ] && DETECTED_TLS_VERSION = " $tls_protocol "
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
[ [ " ${ tls_protocol : 0 : 2 } " != "03" ] ] && return 2
i = $i +4
msg_len = 2*$( hex2dec " ${ tls_hello_ascii : i : 4 } " )
i = $i +4
remaining = $tls_hello_ascii_len -$i
[ [ $msg_len -gt $remaining ] ] && return 1
if [ [ " $tls_content_type " = = "16" ] ] ; then
tls_handshake_ascii += " ${ tls_hello_ascii : i : msg_len } "
tls_handshake_ascii_len = ${# tls_handshake_ascii }
# the ServerHello MUST be the first handshake message
[ [ $tls_handshake_ascii_len -ge 2 ] ] && [ [ " ${ tls_handshake_ascii : 0 : 2 } " != "02" ] ] && return 2
if [ [ $tls_handshake_ascii_len -ge 12 ] ] ; then
DETECTED_TLS_VERSION = " ${ tls_handshake_ascii : 8 : 4 } "
2017-10-03 22:10:09 +02:00
# A version of {0x7F, xx} represents an implementation of a draft version of TLS 1.3
[ [ " ${ DETECTED_TLS_VERSION : 0 : 2 } " = = "7F" ] ] && DETECTED_TLS_VERSION = "0304"
if [ [ 0x$DETECTED_TLS_VERSION -ge 0x0304 ] ] && [ [ " $process_full " = = "ephemeralkey" ] ] ; then
tls_serverhello_ascii_len = 2*$( hex2dec " ${ tls_handshake_ascii : 2 : 6 } " )
if [ [ $tls_handshake_ascii_len -ge $tls_serverhello_ascii_len +8 ] ] ; then
return 0 # The entire ServerHello message has been received (and the rest isn't needed)
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
fi
fi
fi
elif [ [ " $tls_content_type " = = "15" ] ] ; then # TLS ALERT
tls_alert_ascii += " ${ tls_hello_ascii : i : msg_len } "
fi
done
# If there is a fatal alert, then we are done.
tls_alert_ascii_len = ${# tls_alert_ascii }
for ( ( i = 0; i<tls_alert_ascii_len; i = i+4 ) ) ; do
remaining = $tls_alert_ascii_len -$i
[ [ $remaining -lt 4 ] ] && return 1
tls_err_level = ${ tls_alert_ascii : i : 2 } # 1: warning, 2: fatal
[ [ $tls_err_level = = "02" ] ] && DETECTED_TLS_VERSION = "" && return 0
done
# If there is a serverHelloDone or Finished, then we are done.
tls_handshake_ascii_len = ${# tls_handshake_ascii }
for ( ( i = 0; i<tls_handshake_ascii_len; i = i+msg_len ) ) ; do
remaining = $tls_handshake_ascii_len -$i
[ [ $remaining -lt 8 ] ] && return 1
tls_msg_type = " ${ tls_handshake_ascii : i : 2 } "
i = $i +2
msg_len = 2*$( hex2dec " ${ tls_handshake_ascii : i : 6 } " )
i = $i +6
remaining = $tls_handshake_ascii_len -$i
[ [ $msg_len -gt $remaining ] ] && return 1
# For SSLv3 - TLS1.2 look for a ServerHelloDone message.
# For TLS 1.3 look for a Finished message.
[ [ $tls_msg_type = = "0E" ] ] && return 0
[ [ $tls_msg_type = = "14" ] ] && return 0
done
2017-10-03 22:10:09 +02:00
# If the response is TLSv1.3 and the full response is to be processed,
# then return 3 if the entire ServerHello has been received.
if [ [ " $DETECTED_TLS_VERSION " = = "0304" ] ] && [ [ " $process_full " = = "all" ] ] && \
[ [ $tls_handshake_ascii_len -gt 0 ] ] ; then
return 3
fi
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
# If we haven't encoountered a fatal alert or a server hello done,
# then there must be more data to retrieve.
return 1
}
# arg1: ASCII-HEX encoded reply
2016-11-03 15:18:27 +01:00
# arg2: (optional): "all" - process full response (including Certificate and certificate_status handshake messages)
# "ephemeralkey" - extract the server's ephemeral key (if any)
2017-07-11 21:10:40 +02:00
# arg3: (optional): CIPHER_SUITES string (lowercase, and in the format output by code2network())
# If present, parse_tls_serverhello() will check that the cipher in the ServerHello appears in
# the CIPHER_SUITES string.
2015-06-23 12:58:40 +02:00
parse_tls_serverhello( ) {
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
local tls_hello_ascii = " $1 "
2016-11-03 15:18:27 +01:00
local process_full = " $2 "
2017-07-11 21:10:40 +02:00
local cipherlist = " $3 "
2016-05-16 22:52:51 +02:00
local tls_handshake_ascii = "" tls_alert_ascii = ""
local -i tls_hello_ascii_len tls_handshake_ascii_len tls_alert_ascii_len msg_len
2017-01-05 20:20:19 +01:00
local tls_serverhello_ascii = "" tls_certificate_ascii = ""
2016-11-17 18:04:24 +01:00
local tls_serverkeyexchange_ascii = "" tls_certificate_status_ascii = ""
2016-11-09 19:41:36 +01:00
local -i tls_serverhello_ascii_len = 0 tls_certificate_ascii_len = 0
2016-11-17 18:04:24 +01:00
local -i tls_serverkeyexchange_ascii_len = 0 tls_certificate_status_ascii_len = 0
2016-11-09 19:41:36 +01:00
local tls_alert_descrip tls_sid_len_hex issuerDN subjectDN CAissuerDN CAsubjectDN
local -i tls_sid_len offset extns_offset nr_certs = 0
2016-05-16 22:52:51 +02:00
local tls_msg_type tls_content_type tls_protocol tls_protocol2 tls_hello_time
2016-11-03 21:14:14 +01:00
local tls_err_level tls_err_descr tls_cipher_suite rfc_cipher_suite tls_compression_method
2017-10-03 22:10:09 +02:00
local tls_extensions = "" extension_type named_curve_str = "" named_curve_oid
2016-11-17 18:04:24 +01:00
local -i i j extension_len tls_extensions_len ocsp_response_len ocsp_response_list_len
2017-07-11 21:10:40 +02:00
local -i certificate_list_len certificate_len cipherlist_len
2016-11-03 21:14:14 +01:00
local -i curve_type named_curve
local -i dh_bits = 0 msb mask
2016-11-17 18:04:24 +01:00
local tmp_der_certfile tmp_pem_certfile hostcert_issuer = "" ocsp_response = ""
2017-10-03 22:10:09 +02:00
local len1 len2 len3 key_bitstring = "" tmp_der_key_file
local dh_p dh_param ephemeral_param rfc7919_param
local -i dh_p_len dh_param_len
2015-06-22 18:32:40 +02:00
2015-09-17 15:30:15 +02:00
DETECTED_TLS_VERSION = ""
2016-06-17 22:33:00 +02:00
[ [ -n " $tls_hello_ascii " ] ] && echo "CONNECTED(00000003)" > $TMPFILE
2015-09-17 15:30:15 +02:00
2017-07-26 22:37:50 +02:00
[ [ " $DEBUG " -ge 5 ] ] && echo $tls_hello_ascii # one line without any blanks
2016-05-16 22:52:51 +02:00
# Client messages, including handshake messages, are carried by the record layer.
# First, extract the handshake and alert messages.
# see http://en.wikipedia.org/wiki/Transport_Layer_Security-SSL#TLS_record
2015-09-17 15:30:15 +02:00
# byte 0: content type: 0x14=CCS, 0x15=TLS alert x16=Handshake, 0x17 Aplication, 0x18=HB
# byte 1+2: TLS version word, major is 03, minor 00=SSL3, 01=TLS1 02=TLS1.1 03=TLS 1.2
2016-05-16 22:52:51 +02:00
# byte 3+4: fragment length
# bytes 5...: message fragment
tls_hello_ascii_len = ${# tls_hello_ascii }
2017-07-26 22:37:50 +02:00
if [ [ $DEBUG -ge 3 ] ] && [ [ $tls_hello_ascii_len -gt 0 ] ] ; then
2016-11-03 15:18:27 +01:00
echo "TLS message fragments:"
2015-09-17 15:30:15 +02:00
fi
2016-05-16 22:52:51 +02:00
for ( ( i = 0; i<tls_hello_ascii_len; i = i+msg_len ) ) ; do
if [ [ $tls_hello_ascii_len -$i -lt 10 ] ] ; then
2016-11-03 15:18:27 +01:00
if [ [ " $process_full " = = "all" ] ] ; then
# The entire server response should have been retrieved.
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Malformed message."
2016-11-03 15:18:27 +01:00
return 1
else
# This could just be a result of the server's response being
# split across two or more packets.
continue
fi
2016-05-16 22:52:51 +02:00
fi
tls_content_type = " ${ tls_hello_ascii : i : 2 } "
i = $i +2
tls_protocol = " ${ tls_hello_ascii : i : 4 } "
i = $i +4
msg_len = 2*$( hex2dec " ${ tls_hello_ascii : i : 4 } " )
i = $i +4
2015-09-17 15:30:15 +02:00
2017-05-15 19:47:13 +02:00
if [ [ $DEBUG -ge 3 ] ] ; then
echo " protocol (rec. layer): 0x $tls_protocol "
2017-07-12 22:32:12 +02:00
echo -n " tls_content_type: 0x $tls_content_type "
2016-05-16 22:52:51 +02:00
case $tls_content_type in
2017-02-25 16:31:30 +01:00
15) tmln_out " (alert)" ; ;
16) tmln_out " (handshake)" ; ;
17) tmln_out " (application data)" ; ;
*) tmln_out ; ;
2016-05-16 22:52:51 +02:00
esac
echo " msg_len: $(( msg_len/2)) "
2017-02-25 16:31:30 +01:00
tmln_out
2016-05-16 22:52:51 +02:00
fi
2016-11-03 15:18:27 +01:00
if [ [ $tls_content_type != "15" ] ] && [ [ $tls_content_type != "16" ] ] && [ [ $tls_content_type != "17" ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Content type other than alert, handshake, or application data detected."
2016-05-16 22:52:51 +02:00
return 1
elif [ [ " ${ tls_protocol : 0 : 2 } " != "03" ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Protocol record_version.major is not 03."
2016-05-16 22:52:51 +02:00
return 1
fi
DETECTED_TLS_VERSION = $tls_protocol
2015-09-17 15:30:15 +02:00
2016-05-16 22:52:51 +02:00
if [ [ $msg_len -gt $tls_hello_ascii_len -$i ] ] ; then
2016-11-03 15:18:27 +01:00
if [ [ " $process_full " = = "all" ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Malformed message."
2016-11-03 15:18:27 +01:00
return 1
else
# This could just be a result of the server's response being
# split across two or more packets. Just grab the part that
# is available.
msg_len = $tls_hello_ascii_len -$i
fi
2016-05-16 22:52:51 +02:00
fi
2015-09-17 15:30:15 +02:00
2016-05-16 22:52:51 +02:00
if [ [ $tls_content_type = = "16" ] ] ; then
tls_handshake_ascii = " $tls_handshake_ascii ${ tls_hello_ascii : i : msg_len } "
elif [ [ $tls_content_type = = "15" ] ] ; then # TLS ALERT
tls_alert_ascii = " $tls_alert_ascii ${ tls_hello_ascii : i : msg_len } "
fi
done
2015-09-17 15:30:15 +02:00
2016-05-16 22:52:51 +02:00
# Now check the alert messages.
tls_alert_ascii_len = ${# tls_alert_ascii }
2016-11-03 15:18:27 +01:00
if [ [ " $process_full " = = "all" ] ] && [ [ $tls_alert_ascii_len %4 -ne 0 ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Malformed message."
2016-11-03 15:18:27 +01:00
return 1
fi
2016-05-16 22:52:51 +02:00
if [ [ $tls_alert_ascii_len -gt 0 ] ] ; then
debugme echo "TLS alert messages:"
for ( ( i = 0; i+3 < tls_alert_ascii_len; i = i+4 ) ) ; do
tls_err_level = ${ tls_alert_ascii : i : 2 } # 1: warning, 2: fatal
j = $i +2
2017-07-26 22:37:50 +02:00
tls_err_descr = ${ tls_alert_ascii : j : 2 }
2017-02-25 16:31:30 +01:00
debugme tm_out " tls_err_descr: 0x ${ tls_err_descr } / = $( hex2dec ${ tls_err_descr } ) "
2016-05-16 22:52:51 +02:00
case $tls_err_descr in
00) tls_alert_descrip = "close notify" ; ;
0A) tls_alert_descrip = "unexpected message" ; ;
14) tls_alert_descrip = "bad record mac" ; ;
15) tls_alert_descrip = "decryption failed" ; ;
16) tls_alert_descrip = "record overflow" ; ;
1E) tls_alert_descrip = "decompression failure" ; ;
28) tls_alert_descrip = "handshake failure" ; ;
29) tls_alert_descrip = "no certificate RESERVED" ; ;
2A) tls_alert_descrip = "bad certificate" ; ;
2B) tls_alert_descrip = "unsupported certificate" ; ;
2C) tls_alert_descrip = "certificate revoked" ; ;
2D) tls_alert_descrip = "certificate expired" ; ;
2E) tls_alert_descrip = "certificate unknown" ; ;
2F) tls_alert_descrip = "illegal parameter" ; ;
30) tls_alert_descrip = "unknown ca" ; ;
31) tls_alert_descrip = "access denied" ; ;
32) tls_alert_descrip = "decode error" ; ;
33) tls_alert_descrip = "decrypt error" ; ;
3C) tls_alert_descrip = "export restriction RESERVED" ; ;
46) tls_alert_descrip = "protocol version" ; ;
47) tls_alert_descrip = "insufficient security" ; ;
50) tls_alert_descrip = "internal error" ; ;
56) tls_alert_descrip = "inappropriate fallback" ; ;
5A) tls_alert_descrip = "user canceled" ; ;
64) tls_alert_descrip = "no renegotiation" ; ;
2016-11-03 15:18:27 +01:00
6D) tls_alert_descrip = "missing extension" ; ;
2016-05-16 22:52:51 +02:00
6E) tls_alert_descrip = "unsupported extension" ; ;
6F) tls_alert_descrip = "certificate unobtainable" ; ;
70) tls_alert_descrip = "unrecognized name" ; ;
71) tls_alert_descrip = "bad certificate status response" ; ;
72) tls_alert_descrip = "bad certificate hash value" ; ;
73) tls_alert_descrip = "unknown psk identity" ; ;
2016-11-03 15:18:27 +01:00
74) tls_alert_descrip = "certificate required" ; ;
2016-05-16 22:52:51 +02:00
78) tls_alert_descrip = "no application protocol" ; ;
*) tls_alert_descrip = " $( hex2dec " $tls_err_descr " ) " ; ;
esac
if [ [ $DEBUG -ge 2 ] ] ; then
2017-02-25 16:31:30 +01:00
tmln_out " ( $tls_alert_descrip ) "
tm_out " tls_err_level: ${ tls_err_level } "
2016-05-16 22:52:51 +02:00
fi
2017-07-26 22:37:50 +02:00
case $tls_err_level in
01) echo -n "warning " >> $TMPFILE
debugme tmln_out " (warning)" ; ;
02) echo -n "fatal " >> $TMPFILE
debugme tmln_out " (fatal)" ; ;
esac
echo " alert $tls_alert_descrip " >> $TMPFILE
echo "===============================================================================" >> $TMPFILE
2016-05-16 22:52:51 +02:00
if [ [ " $tls_err_level " != "01" ] ] && [ [ " $tls_err_level " != "02" ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning " Unexpected AlertLevel (0x $tls_err_level ). "
2016-05-16 22:52:51 +02:00
return 1
elif [ [ " $tls_err_level " = = "02" ] ] ; then
# Fatal alert
2016-11-03 15:18:27 +01:00
tmpfile_handle $FUNCNAME .txt
2016-05-16 22:52:51 +02:00
return 1
fi
done
fi
2016-11-17 18:04:24 +01:00
# Now extract just the server hello, certificate, certificate status,
2016-11-09 19:41:36 +01:00
# and server key exchange handshake messages.
2016-05-16 22:52:51 +02:00
tls_handshake_ascii_len = ${# tls_handshake_ascii }
2017-05-15 19:47:13 +02:00
if [ [ $DEBUG -ge 3 ] ] && [ [ $tls_handshake_ascii_len -gt 0 ] ] ; then
2016-11-03 15:18:27 +01:00
echo "TLS handshake messages:"
2016-05-16 22:52:51 +02:00
fi
for ( ( i = 0; i<tls_handshake_ascii_len; i = i+msg_len ) ) ; do
if [ [ $tls_handshake_ascii_len -$i -lt 8 ] ] ; then
2016-11-03 15:18:27 +01:00
if [ [ " $process_full " = = "all" ] ] ; then
# The entire server response should have been retrieved.
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Malformed message."
2016-11-03 15:18:27 +01:00
return 1
else
# This could just be a result of the server's response being
# split across two or more packets.
continue
fi
2016-05-16 22:52:51 +02:00
fi
tls_msg_type = " ${ tls_handshake_ascii : i : 2 } "
i = $i +2
msg_len = 2*$( hex2dec " ${ tls_handshake_ascii : i : 6 } " )
i = $i +6
2017-05-15 19:47:13 +02:00
if [ [ $DEBUG -ge 3 ] ] ; then
2017-02-25 16:31:30 +01:00
tm_out " handshake type: 0x ${ tls_msg_type } "
2016-05-16 22:52:51 +02:00
case $tls_msg_type in
2017-02-25 16:31:30 +01:00
00) tmln_out " (hello_request)" ; ;
01) tmln_out " (client_hello)" ; ;
02) tmln_out " (server_hello)" ; ;
03) tmln_out " (hello_verify_request)" ; ;
2017-10-03 22:10:09 +02:00
04) tmln_out " (new_session_ticket)" ; ;
05) tmln_out " (end_of_early_data)" ; ;
2017-02-25 16:31:30 +01:00
06) tmln_out " (hello_retry_request)" ; ;
08) tmln_out " (encrypted_extensions)" ; ;
0B) tmln_out " (certificate)" ; ;
0C) tmln_out " (server_key_exchange)" ; ;
0D) tmln_out " (certificate_request)" ; ;
0E) tmln_out " (server_hello_done)" ; ;
0F) tmln_out " (certificate_verify)" ; ;
10) tmln_out " (client_key_exchange)" ; ;
14) tmln_out " (finished)" ; ;
15) tmln_out " (certificate_url)" ; ;
16) tmln_out " (certificate_status)" ; ;
17) tmln_out " (supplemental_data)" ; ;
18) tmln_out " (key_update)" ; ;
2017-10-03 22:10:09 +02:00
FE) tmln_out " (message_hash)" ; ;
2017-02-25 16:31:30 +01:00
*) tmln_out ; ;
2016-05-16 22:52:51 +02:00
esac
echo " msg_len: $(( msg_len/2)) "
2017-02-25 16:31:30 +01:00
tmln_out
2015-09-17 15:30:15 +02:00
fi
2016-05-16 22:52:51 +02:00
if [ [ $msg_len -gt $tls_handshake_ascii_len -$i ] ] ; then
2016-11-03 15:18:27 +01:00
if [ [ " $process_full " = = "all" ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Malformed message."
2016-11-03 15:18:27 +01:00
return 1
else
# This could just be a result of the server's response being
# split across two or more packets. Just grab the part that
# is available.
msg_len = $tls_handshake_ascii_len -$i
fi
2015-09-17 15:30:15 +02:00
fi
2016-05-16 22:52:51 +02:00
if [ [ " $tls_msg_type " = = "02" ] ] ; then
if [ [ -n " $tls_serverhello_ascii " ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Response contained more than one ServerHello handshake message."
2016-05-16 22:52:51 +02:00
return 1
fi
tls_serverhello_ascii = " ${ tls_handshake_ascii : i : msg_len } "
tls_serverhello_ascii_len = $msg_len
2016-11-09 19:41:36 +01:00
elif [ [ " $process_full " = = "all" ] ] && [ [ " $tls_msg_type " = = "0B" ] ] ; then
if [ [ -n " $tls_certificate_ascii " ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Response contained more than one Certificate handshake message."
2016-11-09 19:41:36 +01:00
return 1
fi
tls_certificate_ascii = " ${ tls_handshake_ascii : i : msg_len } "
tls_certificate_ascii_len = $msg_len
2016-11-03 21:14:14 +01:00
elif ( [ [ " $process_full " = = "all" ] ] || [ [ " $process_full " = = "ephemeralkey" ] ] ) && [ [ " $tls_msg_type " = = "0C" ] ] ; then
if [ [ -n " $tls_serverkeyexchange_ascii " ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Response contained more than one ServerKeyExchange handshake message."
2016-11-03 21:14:14 +01:00
return 1
fi
tls_serverkeyexchange_ascii = " ${ tls_handshake_ascii : i : msg_len } "
tls_serverkeyexchange_ascii_len = $msg_len
2016-11-17 18:04:24 +01:00
elif [ [ " $process_full " = = "all" ] ] && [ [ " $tls_msg_type " = = "16" ] ] ; then
if [ [ -n " $tls_certificate_status_ascii " ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Response contained more than one certificate_status handshake message."
2016-11-17 18:04:24 +01:00
return 1
fi
tls_certificate_status_ascii = " ${ tls_handshake_ascii : i : msg_len } "
tls_certificate_status_ascii_len = $msg_len
2015-09-17 15:30:15 +02:00
fi
2016-05-16 22:52:51 +02:00
done
if [ [ $tls_serverhello_ascii_len -eq 0 ] ] ; then
debugme echo "server hello empty, TCP connection closed"
2017-09-01 16:13:32 +02:00
DETECTED_TLS_VERSION = "closed TCP connection "
2016-11-03 15:18:27 +01:00
tmpfile_handle $FUNCNAME .txt
2016-05-16 22:52:51 +02:00
return 1 # no server hello received
elif [ [ $tls_serverhello_ascii_len -lt 76 ] ] ; then
2017-09-01 16:13:32 +02:00
DETECTED_TLS_VERSION = "reply malformed"
2016-05-16 22:52:51 +02:00
debugme echo "Malformed response"
return 1
elif [ [ " ${ tls_handshake_ascii : 0 : 2 } " != "02" ] ] ; then
# the ServerHello MUST be the first handshake message
2017-09-01 16:13:32 +02:00
DETECTED_TLS_VERSION = "reply contained no ServerHello"
2017-02-25 16:31:30 +01:00
debugme tmln_warning "The first handshake protocol message is not a ServerHello."
2016-05-16 22:52:51 +02:00
return 1
2015-09-17 15:30:15 +02:00
fi
2016-11-09 19:41:36 +01:00
# First parse the server hello handshake message
2016-05-16 22:52:51 +02:00
# byte 0+1: 03, TLS version word see byte 1+2
# byte 2-5: TLS timestamp for OpenSSL <1.01f
# byte 6-33: random, 28 bytes
# byte 34: session id length
# byte 35+36+sid-len: cipher suite!
# byte 37+sid-len: compression method: 00: none, 01: deflate, 64: LZS
# byte 38+39+sid-len: extension length
tls_protocol2 = " ${ tls_serverhello_ascii : 0 : 4 } "
2017-10-03 22:10:09 +02:00
[ [ " ${ tls_protocol2 : 0 : 2 } " = = "7F" ] ] && tls_protocol2 = "0304"
2016-05-16 22:52:51 +02:00
if [ [ " ${ tls_protocol2 : 0 : 2 } " != "03" ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "server_version.major in ServerHello is not 03."
2016-05-16 22:52:51 +02:00
return 1
2015-09-17 15:30:15 +02:00
fi
2016-05-16 22:52:51 +02:00
DETECTED_TLS_VERSION = " $tls_protocol2 "
2015-09-17 15:30:15 +02:00
2016-11-03 15:18:27 +01:00
if [ [ " 0x ${ tls_protocol2 : 2 : 2 } " -le "0x03" ] ] ; then
tls_hello_time = " ${ tls_serverhello_ascii : 4 : 8 } "
2017-10-31 11:27:19 +01:00
[ [ " $TLS_DIFFTIME_SET " || " $DEBUG " ] ] && TLS_TIME = $( hex2dec " $tls_hello_time " )
2016-11-03 15:18:27 +01:00
tls_sid_len_hex = " ${ tls_serverhello_ascii : 68 : 2 } "
tls_sid_len = 2*$( hex2dec " $tls_sid_len_hex " )
let offset = 70+$tls_sid_len
if [ [ $tls_serverhello_ascii_len -lt 76+$tls_sid_len ] ] ; then
debugme echo "Malformed response"
return 1
fi
else
let offset = 68
fi
2016-05-16 22:52:51 +02:00
2016-11-03 15:18:27 +01:00
tls_cipher_suite = " ${ tls_serverhello_ascii : offset : 4 } "
2016-05-16 22:52:51 +02:00
2016-11-03 15:18:27 +01:00
if [ [ " 0x ${ tls_protocol2 : 2 : 2 } " -le "0x03" ] ] ; then
let offset = 74+$tls_sid_len
tls_compression_method = " ${ tls_serverhello_ascii : offset : 2 } "
let extns_offset = 76+$tls_sid_len
else
let extns_offset = 72
2016-05-16 22:52:51 +02:00
fi
2016-11-03 15:18:27 +01:00
if [ [ $tls_serverhello_ascii_len -gt $extns_offset ] ] && \
( [ [ " $process_full " = = "all" ] ] || ( [ [ " $process_full " = = "ephemeralkey" ] ] && [ [ " 0x ${ tls_protocol2 : 2 : 2 } " -gt "0x03" ] ] ) ) ; then
if [ [ $tls_serverhello_ascii_len -lt $extns_offset +4 ] ] ; then
debugme echo "Malformed response"
return 1
fi
tls_extensions_len = $( hex2dec " ${ tls_serverhello_ascii : extns_offset : 4 } " ) *2
if [ [ $tls_extensions_len -ne $tls_serverhello_ascii_len -$extns_offset -4 ] ] ; then
2017-02-25 16:31:30 +01:00
debugme tmln_warning "Malformed message."
2016-11-03 15:18:27 +01:00
return 1
fi
for ( ( i = 0; i<tls_extensions_len; i = i+8+extension_len ) ) ; do
if [ [ $tls_extensions_len -$i -lt 8 ] ] ; then
debugme echo "Malformed response"
return 1
fi
let offset = $extns_offset +4+$i
extension_type = " ${ tls_serverhello_ascii : offset : 4 } "
let offset = $extns_offset +8+$i
extension_len = 2*$( hex2dec " ${ tls_serverhello_ascii : offset : 4 } " )
if [ [ $extension_len -gt $tls_extensions_len -$i -8 ] ] ; then
debugme echo "Malformed response"
return 1
fi
case $extension_type in
2017-03-22 20:18:38 +01:00
0000) tls_extensions += " TLS server extension \"server name\" (id=0), len= $extension_len \n " ; ;
0001) tls_extensions += " TLS server extension \"max fragment length\" (id=1), len= $extension_len \n " ; ;
0002) tls_extensions += " TLS server extension \"client certificate URL\" (id=2), len= $extension_len \n " ; ;
0003) tls_extensions += " TLS server extension \"trusted CA keys\" (id=3, len= $extension_len \n) " ; ;
0004) tls_extensions += " TLS server extension \"truncated HMAC\" (id=4), len= $extension_len \n " ; ;
0005) tls_extensions += " TLS server extension \"status request\" (id=5), len= $extension_len \n " ; ;
0006) tls_extensions += " TLS server extension \"user mapping\" (id=6), len= $extension_len \n " ; ;
0007) tls_extensions += " TLS server extension \"client authz\" (id=7), len= $extension_len \n " ; ;
0008) tls_extensions += " TLS server extension \"server authz\" (id=8), len= $extension_len \n " ; ;
0009) tls_extensions += " TLS server extension \"cert type\" (id=9), len= $extension_len \n " ; ;
000A) tls_extensions += " TLS server extension \"supported_groups\" (id=10), len= $extension_len \n " ; ;
000B) tls_extensions += " TLS server extension \"EC point formats\" (id=11), len= $extension_len \n " ; ;
000C) tls_extensions += " TLS server extension \"SRP\" (id=12), len= $extension_len \n " ; ;
000D) tls_extensions += " TLS server extension \"signature algorithms\" (id=13), len= $extension_len \n " ; ;
000E) tls_extensions += " TLS server extension \"use SRTP\" (id=14), len= $extension_len \n " ; ;
000F) tls_extensions += " TLS server extension \"heartbeat\" (id=15), len= $extension_len \n " ; ;
0010) tls_extensions += " TLS server extension \"application layer protocol negotiation\" (id=16), len= $extension_len \n "
2016-11-18 18:09:07 +01:00
if [ [ $extension_len -lt 4 ] ] ; then
debugme echo "Malformed application layer protocol negotiation extension."
return 1
fi
echo -n "ALPN protocol: " >> $TMPFILE
let offset = $extns_offset +12+$i
j = 2*$( hex2dec " ${ tls_serverhello_ascii : offset : 4 } " )
if [ [ $extension_len -ne $j +4 ] ] || [ [ $j -lt 2 ] ] ; then
debugme echo "Malformed application layer protocol negotiation extension."
return 1
fi
let offset = $offset +4
j = 2*$( hex2dec " ${ tls_serverhello_ascii : offset : 2 } " )
if [ [ $extension_len -ne $j +6 ] ] ; then
debugme echo "Malformed application layer protocol negotiation extension."
return 1
fi
let offset = $offset +2
asciihex_to_binary_file " ${ tls_serverhello_ascii : offset : j } " " $TMPFILE "
echo "" >> $TMPFILE
echo "===============================================================================" >> $TMPFILE
; ;
2017-03-22 20:18:38 +01:00
0011) tls_extensions += " TLS server extension \"certificate status version 2\" (id=17), len= $extension_len \n " ; ;
0012) tls_extensions += " TLS server extension \"signed certificate timestamps\" (id=18), len= $extension_len \n " ; ;
0013) tls_extensions += " TLS server extension \"client certificate type\" (id=19), len= $extension_len \n " ; ;
0014) tls_extensions += " TLS server extension \"server certificate type\" (id=20), len= $extension_len \n " ; ;
0015) tls_extensions += " TLS server extension \"TLS padding\" (id=21), len= $extension_len \n " ; ;
0016) tls_extensions += " TLS server extension \"encrypt-then-mac\" (id=22), len= $extension_len \n " ; ;
0017) tls_extensions += " TLS server extension \"extended master secret\" (id=23), len= $extension_len \n " ; ;
0018) tls_extensions += " TLS server extension \"token binding\" (id=24), len= $extension_len \n " ; ;
0019) tls_extensions += " TLS server extension \"cached info\" (id=25), len= $extension_len \n " ; ;
0023) tls_extensions += " TLS server extension \"session ticket\" (id=35), len= $extension_len \n " ; ;
2017-10-03 22:10:09 +02:00
0028) tls_extensions += " TLS server extension \"key share\" (id=40), len= $extension_len \n "
if [ [ $extension_len -lt 4 ] ] ; then
debugme tmln_warning "Malformed key share extension."
return 1
fi
let offset = $extns_offset +12+$i
named_curve = $( hex2dec " ${ tls_serverhello_ascii : offset : 4 } " )
let offset = $extns_offset +16+$i
msg_len = 2*" $( hex2dec " ${ tls_serverhello_ascii : offset : 4 } " ) "
if [ [ $msg_len -ne $extension_len -8 ] ] ; then
debugme tmln_warning "Malformed key share extension."
return 1
fi
case $named_curve in
2017-11-02 16:28:09 +01:00
21) dh_bits = 224 ; named_curve_str = "P-224" ; named_curve_oid = "06052b81040021" ; ;
2017-10-03 22:10:09 +02:00
23) dh_bits = 256 ; named_curve_str = "P-256" ; named_curve_oid = "06082a8648ce3d030107" ; ;
24) dh_bits = 384 ; named_curve_str = "P-384" ; named_curve_oid = "06052b81040022" ; ;
25) dh_bits = 521 ; named_curve_str = "P-521" ; named_curve_oid = "06052b81040023" ; ;
29) dh_bits = 253 ; named_curve_str = "X25519" ; ;
30) dh_bits = 448 ; named_curve_str = "X448" ; ;
256) dh_bits = 2048 ; named_curve_str = "ffdhe2048" ; ;
257) dh_bits = 3072 ; named_curve_str = "ffdhe3072" ; ;
258) dh_bits = 4096 ; named_curve_str = "ffdhe4096" ; ;
259) dh_bits = 6144 ; named_curve_str = "ffdhe6144" ; ;
260) dh_bits = 8192 ; named_curve_str = "ffdhe8192" ; ;
*) named_curve_str = "" ; named_curve_oid = "" ; ;
esac
let offset = $extns_offset +20+$i
if [ [ $named_curve -eq 29 ] ] ; then
key_bitstring = " 302a300506032b656e032100 ${ tls_serverhello_ascii : offset : msg_len } "
elif [ [ $named_curve -eq 30 ] ] ; then
key_bitstring = " 3042300506032b656f033900 ${ tls_serverhello_ascii : offset : msg_len } "
elif [ [ $named_curve -lt 256 ] ] && [ [ -n " $named_curve_oid " ] ] ; then
len1 = " $( printf "%02x" $(( $msg_len / 2 + 1 )) ) "
[ [ " 0x ${ len1 } " -ge "0x80" ] ] && len1 = " 81 ${ len1 } "
key_bitstring = " 03 ${ len1 } 00 ${ tls_serverhello_ascii : offset : msg_len } "
len2 = " $( printf "%02x" $(( ${# named_curve_oid } / 2 + 9 )) ) "
len3 = " $( printf "%02x" $(( ${# named_curve_oid } / 2 + ${# key_bitstring } / 2 + 11 )) ) "
[ [ " 0x ${ len3 } " -ge "0x80" ] ] && len3 = " 81 ${ len3 } "
key_bitstring = " 30 ${ len3 } 30 ${ len2 } 06072a8648ce3d0201 ${ named_curve_oid } ${ key_bitstring } "
elif [ [ " $named_curve_str " = ~ "ffdhe" ] ] && [ [ " ${ TLS13_KEY_SHARES [named_curve] } " = ~ "BEGIN" ] ] ; then
2017-10-27 19:07:04 +02:00
dh_param = " $( $OPENSSL pkey -pubout -outform DER 2>>$ERRFILE <<< " ${ TLS13_KEY_SHARES [named_curve] } " | hexdump -v -e '16/1 "%02X"' ) "
2017-10-03 22:10:09 +02:00
# First is the length of the public-key SEQUENCE, and it is always encoded in four bytes (3082xxxx)
# Next is the length of the parameters SEQUENCE, and it is also always encoded in four bytes (3082xxxx)
dh_param_len = 8+2*" $( hex2dec " ${ dh_param : 12 : 4 } " ) "
dh_param = " ${ dh_param : 8 : dh_param_len } "
if [ [ " 0x ${ tls_serverhello_ascii : offset : 2 } " -ge 0x80 ] ] ; then
key_bitstring = " 00 ${ tls_serverhello_ascii : offset : msg_len } "
msg_len += 2
else
key_bitstring = " ${ tls_serverhello_ascii : offset : msg_len } "
fi
len1 = " $( printf "%04x" $(( $msg_len / 2 )) ) "
key_bitstring = " 0282 ${ len1 } $key_bitstring "
len1 = " $( printf "%04x" $(( ${# key_bitstring } / 2 + 1 )) ) "
key_bitstring = " ${ dh_param } 0382 ${ len1 } 00 $key_bitstring "
len1 = " $( printf "%04x" $(( ${# key_bitstring } / 2 )) ) "
key_bitstring = " 3082 ${ len1 } $key_bitstring "
fi
if [ [ -n " $key_bitstring " ] ] ; then
tmp_der_key_file = $( mktemp $TEMPDIR /pub_key_der.XXXXXX) || return 1
asciihex_to_binary_file " $key_bitstring " " $tmp_der_key_file "
key_bitstring = " $( $OPENSSL pkey -pubin -in $tmp_der_key_file -inform DER 2>$ERRFILE ) "
rm $tmp_der_key_file
fi
; ;
2017-03-22 20:18:38 +01:00
0029) tls_extensions += " TLS server extension \"pre-shared key\" (id=41), len= $extension_len \n " ; ;
002A) tls_extensions += " TLS server extension \"early data\" (id=42), len= $extension_len \n " ; ;
002B) tls_extensions += " TLS server extension \"supported versions\" (id=43), len= $extension_len \n " ; ;
002C) tls_extensions += " TLS server extension \"cookie\" (id=44), len= $extension_len \n " ; ;
002D) tls_extensions += " TLS server extension \"psk key exchange modes\" (id=45), len= $extension_len \n " ; ;
002E) tls_extensions += " TLS server extension \"ticket early data info\" (id=46), len= $extension_len \n " ; ;
2017-10-03 22:10:09 +02:00
002F) tls_extensions += " TLS server extension \"certificate authorities\" (id=47), len= $extension_len \n " ; ;
0030) tls_extensions += " TLS server extension \"oid filters\" (id=48), len= $extension_len \n " ; ;
0031) tls_extensions += " TLS server extension \"post handshake auth\" (id=49), len= $extension_len \n " ; ;
2017-03-22 20:18:38 +01:00
3374) tls_extensions += " TLS server extension \"next protocol\" (id=13172), len= $extension_len \n "
2016-11-18 18:09:07 +01:00
local -i protocol_len
echo -n "Protocols advertised by server: " >> $TMPFILE
let offset = $extns_offset +12+$i
for ( ( j = 0; j<extension_len; j = j+protocol_len+2 ) ) ; do
if [ [ $extension_len -lt $j +2 ] ] ; then
debugme echo "Malformed next protocol extension."
return 1
fi
protocol_len = 2*$( hex2dec " ${ tls_serverhello_ascii : offset : 2 } " )
if [ [ $extension_len -lt $j +$protocol_len +2 ] ] ; then
debugme echo "Malformed next protocol extension."
return 1
fi
let offset = $offset +2
asciihex_to_binary_file " ${ tls_serverhello_ascii : offset : protocol_len } " " $TMPFILE "
let offset = $offset +$protocol_len
[ [ $j +$protocol_len +2 -lt $extension_len ] ] && echo -n ", " >> $TMPFILE
done
echo "" >> $TMPFILE
echo "===============================================================================" >> $TMPFILE
; ;
2017-03-22 20:18:38 +01:00
FF01) tls_extensions += " TLS server extension \"renegotiation info\" (id=65281), len= $extension_len \n " ; ;
*) tls_extensions += " TLS server extension \"unrecognized extension\" (id= $( printf "%d\n\n" " 0x $extension_type " ) ), len= $extension_len \n " ; ;
2016-11-03 15:18:27 +01:00
esac
done
fi
2015-09-17 15:30:15 +02:00
2016-06-17 22:33:00 +02:00
if [ [ " $tls_protocol2 " = = "0300" ] ] ; then
echo "Protocol : SSLv3" >> $TMPFILE
else
echo " Protocol : TLSv1. $(( 0 x$tls_protocol2 - 0 x0301)) " >> $TMPFILE
fi
echo "===============================================================================" >> $TMPFILE
2016-11-15 21:55:54 +01:00
if [ [ $TLS_NR_CIPHERS -ne 0 ] ] ; then
if [ [ " ${ tls_cipher_suite : 0 : 2 } " = = "00" ] ] ; then
rfc_cipher_suite = " $( show_rfc_style " x ${ tls_cipher_suite : 2 : 2 } " ) "
else
rfc_cipher_suite = " $( show_rfc_style " x ${ tls_cipher_suite : 0 : 4 } " ) "
fi
2016-06-17 22:33:00 +02:00
else
2017-10-27 19:07:04 +02:00
rfc_cipher_suite = " $( $OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL' 2>>$ERRFILE | grep -i " 0x ${ tls_cipher_suite : 0 : 2 } ,0x ${ tls_cipher_suite : 2 : 2 } " | awk '{ print $3 }' ) "
2016-06-17 22:33:00 +02:00
fi
2016-11-03 21:14:14 +01:00
echo " Cipher : $rfc_cipher_suite " >> $TMPFILE
2017-10-03 22:10:09 +02:00
if [ [ $dh_bits -ne 0 ] ] ; then
if [ [ " $named_curve_str " = ~ "ffdhe" ] ] ; then
echo " Server Temp Key: DH, $named_curve_str , $dh_bits bits " >> $TMPFILE
elif [ [ " $named_curve_str " = = "X25519" ] ] || [ [ " $named_curve_str " = = "X448" ] ] ; then
echo " Server Temp Key: $named_curve_str , $dh_bits bits " >> $TMPFILE
else
echo " Server Temp Key: ECDH, $named_curve_str , $dh_bits bits " >> $TMPFILE
fi
fi
if [ [ -n " $key_bitstring " ] ] ; then
echo " $key_bitstring " >> $TMPFILE
[ [ " ${ TLS13_KEY_SHARES [named_curve] } " = ~ "BEGIN" ] ] && \
echo " ${ TLS13_KEY_SHARES [named_curve] } " >> $TMPFILE
fi
echo "===============================================================================" >> $TMPFILE
2016-11-03 15:18:27 +01:00
if [ [ " 0x ${ tls_protocol2 : 2 : 2 } " -le "0x03" ] ] ; then
case $tls_compression_method in
00) echo "Compression: NONE" >> $TMPFILE ; ;
01) echo "Compression: zlib compression" >> $TMPFILE ; ;
40) echo "Compression: LZS compression" >> $TMPFILE ; ;
*) echo "Compression: unrecognized compression method" >> $TMPFILE ; ;
esac
echo "===============================================================================" >> $TMPFILE
2016-06-17 22:33:00 +02:00
fi
2017-03-22 20:18:38 +01:00
[ [ -n " $tls_extensions " ] ] && echo -e " $tls_extensions " >> $TMPFILE
2016-06-17 22:33:00 +02:00
2017-05-15 19:47:13 +02:00
if [ [ $DEBUG -ge 3 ] ] ; then
2016-05-16 22:52:51 +02:00
echo "TLS server hello message:"
2015-09-17 15:30:15 +02:00
if [ [ $DEBUG -ge 4 ] ] ; then
2016-05-16 22:52:51 +02:00
echo " tls_protocol: 0x $tls_protocol2 "
2016-11-03 15:18:27 +01:00
[ [ " 0x ${ tls_protocol2 : 2 : 2 } " -le "0x03" ] ] && echo " tls_sid_len: 0x $tls_sid_len_hex / = $(( tls_sid_len/2)) "
fi
if [ [ " 0x ${ tls_protocol2 : 2 : 2 } " -le "0x03" ] ] ; then
echo -n " tls_hello_time: 0x $tls_hello_time "
2017-10-31 11:27:19 +01:00
parse_date " $TLS_TIME " "+%Y-%m-%d %r" "%s" # in debugging mode we don't mind the cycles and don't use TLS_DIFFTIME_SET
2015-09-17 15:30:15 +02:00
fi
2017-10-03 22:10:09 +02:00
echo -n " tls_cipher_suite: 0x $tls_cipher_suite "
if [ [ -n " $rfc_cipher_suite " ] ] ; then
echo " ( $rfc_cipher_suite ) "
else
echo ""
fi
if [ [ $dh_bits -ne 0 ] ] ; then
if [ [ " $named_curve_str " = ~ "ffdhe" ] ] ; then
echo " dh_bits: DH, $named_curve_str , $dh_bits bits "
elif [ [ " $named_curve_str " = = "X25519" ] ] || [ [ " $named_curve_str " = = "X448" ] ] ; then
echo " dh_bits: $named_curve_str , $dh_bits bits "
else
echo " dh_bits: ECDH, $named_curve_str , $dh_bits bits "
fi
fi
2016-11-03 15:18:27 +01:00
if [ [ " 0x ${ tls_protocol2 : 2 : 2 } " -le "0x03" ] ] ; then
echo -n " tls_compression_method: 0x $tls_compression_method "
case $tls_compression_method in
00) echo "(NONE)" ; ;
01) echo "(zlib compression)" ; ;
40) echo "(LZS compression)" ; ;
*) echo "(unrecognized compression method)" ; ;
esac
fi
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
if [ [ -n " $tls_extensions " ] ] ; then
2017-03-22 20:18:38 +01:00
echo -n " tls_extensions: "
newline_to_spaces " $( grep -a 'TLS server extension ' $TMPFILE | \
sed -e 's/TLS server extension //g' -e 's/\" (id=/\/#/g' \
-e 's/,.*$/,/g' -e 's/),$/\"/g' \
-e 's/elliptic curves\/#10/supported_groups\/#10/g' ) "
echo ""
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
if [ [ " $tls_extensions " = ~ "application layer protocol negotiation" ] ] ; then
2016-11-18 18:09:07 +01:00
echo " ALPN protocol: $( grep "ALPN protocol:" " $TMPFILE " | sed 's/ALPN protocol: //' ) "
fi
Reorganize run_server_defaults()
This function reorganizes `run_server_defaults()` based on the suggestion in #515.
The current `determine_tls_extensions()` is renamed to `get_server_certificate()`, and two changes are made to it:
*it no longer includes an extra call to `$OPENSSL s_client` to check for the ALPN extension; and
* rather than setting `$TLS_EXTENSIONS` to be the extensions found during this call to the function, it adds any newly found extensions to those already in `$TLS_EXTENSIONS`.
The PR then adds a new function, `determine_tls_extensions()`, which borrows some logic from the old `determine_tls_extensions()`, but this new `determine_tls_extensions()` only looks for additional TLS extensions, including ALPN.
`run_server_defaults()` makes multiple calls to `get_server_certificate()` (as it previously did to `determine_tls_extensions()`) in order to collect all of the server's certificates, and then it makes one call to `determine_tls_extensions()`, which checks for support for extensions that were not checked for by `get_server_certificate()` (e.g., ALPN, extended master secret, signed certificate timestamps).
The new `determine_tls_extensions()` will check for most of the extensions that are checked for by
`run_server_defaults()`, including the heartbeat extension, so the call to `determine_tls_extensions()` from `run_heartbleed()` will still work.
2016-11-21 20:26:19 +01:00
if [ [ " $tls_extensions " = ~ "next protocol" ] ] ; then
2016-11-18 18:09:07 +01:00
echo " NPN protocols: $( grep "Protocols advertised by server:" " $TMPFILE " | sed 's/Protocols advertised by server: //' ) "
fi
2016-11-03 15:18:27 +01:00
fi
2017-02-25 16:31:30 +01:00
tmln_out
2015-09-17 15:30:15 +02:00
fi
2016-11-03 21:14:14 +01:00
2017-07-11 21:10:40 +02:00
# If a CIPHER_SUITES string was provided, then check that $tls_cipher_suite is in the string.
2017-07-19 18:46:46 +02:00
# this appeared in yassl + MySQL (https://github.com/drwetter/testssl.sh/pull/784) but adds robustness
2017-07-25 16:19:36 +02:00
# to the implementation
2017-07-11 21:10:40 +02:00
if [ [ -n " $cipherlist " ] ] ; then
tls_cipher_suite = " $( tolower " $tls_cipher_suite " ) "
tls_cipher_suite = " ${ tls_cipher_suite : 0 : 2 } \\x ${ tls_cipher_suite : 2 : 2 } "
cipherlist_len = ${# cipherlist }
for ( ( i = 0; i < cipherlist_len; i = i+8 ) ) ; do
[ [ " ${ cipherlist : i : 6 } " = = " $tls_cipher_suite " ] ] && break
done
if [ [ $i -ge $cipherlist_len ] ] ; then
2017-07-19 18:46:46 +02:00
BAD_SERVER_HELLO_CIPHER = true
2017-07-11 21:10:40 +02:00
debugme echo "The ServerHello specifies a cipher suite that wasn't included in the ClientHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
fi
2016-11-09 19:41:36 +01:00
# Now parse the Certificate message.
if [ [ " $process_full " = = "all" ] ] ; then
[ [ -e " $HOSTCERT " ] ] && rm " $HOSTCERT "
[ [ -e " $TEMPDIR /intermediatecerts.pem " ] ] && rm " $TEMPDIR /intermediatecerts.pem "
fi
if [ [ $tls_certificate_ascii_len -ne 0 ] ] ; then
# The first certificate is the server's certificate. If there are anything
# subsequent certificates, they are intermediate certificates.
if [ [ $tls_certificate_ascii_len -lt 12 ] ] ; then
debugme echo "Malformed Certificate Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
certificate_list_len = 2*$( hex2dec " ${ tls_certificate_ascii : 0 : 6 } " )
if [ [ $certificate_list_len -ne $tls_certificate_ascii_len -6 ] ] ; then
debugme echo "Malformed Certificate Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
# Place server's certificate in $HOSTCERT
certificate_len = 2*$( hex2dec " ${ tls_certificate_ascii : 6 : 6 } " )
if [ [ $certificate_len -gt $tls_certificate_ascii_len -12 ] ] ; then
debugme echo "Malformed Certificate Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
tmp_der_certfile = $( mktemp $TEMPDIR /der_cert.XXXXXX) || return 1
asciihex_to_binary_file " ${ tls_certificate_ascii : 12 : certificate_len } " " $tmp_der_certfile "
$OPENSSL x509 -inform DER -in " $tmp_der_certfile " -outform PEM -out " $HOSTCERT " 2>$ERRFILE
if [ [ $? -ne 0 ] ] ; then
debugme echo "Malformed certificate in Certificate Handshake message in ServerHello."
rm " $tmp_der_certfile "
tmpfile_handle $FUNCNAME .txt
return 1
fi
rm " $tmp_der_certfile "
get_pub_key_size
echo "===============================================================================" >> $TMPFILE
echo "---" >> $TMPFILE
echo "Certificate chain" >> $TMPFILE
2017-09-22 20:06:51 +02:00
subjectDN = " $( $OPENSSL x509 -in $HOSTCERT -noout -subject 2>>$ERRFILE ) "
issuerDN = " $( $OPENSSL x509 -in $HOSTCERT -noout -issuer 2>>$ERRFILE ) "
2016-11-09 19:41:36 +01:00
echo " $nr_certs s: ${ subjectDN : 9 } " >> $TMPFILE
echo " i: ${ issuerDN : 8 } " >> $TMPFILE
cat " $HOSTCERT " >> $TMPFILE
echo "" > " $TEMPDIR /intermediatecerts.pem "
2016-11-17 18:04:24 +01:00
# Place any additional certificates in $TEMPDIR/intermediatecerts.pem
2016-11-09 19:41:36 +01:00
for ( ( i = 12+certificate_len; i<tls_certificate_ascii_len; i = i+certificate_len ) ) ; do
if [ [ $tls_certificate_ascii_len -$i -lt 6 ] ] ; then
debugme echo "Malformed Certificate Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
certificate_len = 2*$( hex2dec " ${ tls_certificate_ascii : i : 6 } " )
i += 6
if [ [ $certificate_len -gt $tls_certificate_ascii_len -$i ] ] ; then
debugme echo "Malformed certificate in Certificate Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
tmp_der_certfile = $( mktemp $TEMPDIR /der_cert.XXXXXX) || return 1
asciihex_to_binary_file " ${ tls_certificate_ascii : i : certificate_len } " " $tmp_der_certfile "
tmp_pem_certfile = $( mktemp $TEMPDIR /pem_cert.XXXXXX) || return 1
$OPENSSL x509 -inform DER -in " $tmp_der_certfile " -outform PEM -out " $tmp_pem_certfile " 2>$ERRFILE
if [ [ $? -ne 0 ] ] ; then
debugme echo "Malformed certificate in Certificate Handshake message in ServerHello."
rm " $tmp_der_certfile " " $tmp_pem_certfile "
tmpfile_handle $FUNCNAME .txt
return 1
fi
nr_certs += 1
2017-09-22 20:06:51 +02:00
CAsubjectDN = " $( $OPENSSL x509 -in $tmp_pem_certfile -noout -subject 2>>$ERRFILE ) "
CAissuerDN = " $( $OPENSSL x509 -in $tmp_pem_certfile -noout -issuer 2>>$ERRFILE ) "
2016-11-09 19:41:36 +01:00
echo " $nr_certs s: ${ CAsubjectDN : 9 } " >> $TMPFILE
echo " i: ${ CAissuerDN : 8 } " >> $TMPFILE
cat " $tmp_pem_certfile " >> $TMPFILE
cat " $tmp_pem_certfile " >> " $TEMPDIR /intermediatecerts.pem "
2016-11-17 18:04:24 +01:00
rm " $tmp_der_certfile "
if [ [ -n " $hostcert_issuer " ] ] || [ [ $tls_certificate_status_ascii_len -eq 0 ] ] ; then
rm " $tmp_pem_certfile "
else
hostcert_issuer = " $tmp_pem_certfile "
fi
2016-11-09 19:41:36 +01:00
done
echo "---" >> $TMPFILE
echo "Server certificate" >> $TMPFILE
echo " subject= ${ subjectDN : 9 } " >> $TMPFILE
echo " issuer= ${ issuerDN : 8 } " >> $TMPFILE
echo "---" >> $TMPFILE
fi
2016-11-17 18:04:24 +01:00
# Now parse the certificate status message
if [ [ $tls_certificate_status_ascii_len -ne 0 ] ] && [ [ $tls_certificate_status_ascii_len -lt 8 ] ] ; then
debugme echo "Malformed certificate status Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
elif [ [ $tls_certificate_status_ascii_len -ne 0 ] ] && [ [ " ${ tls_certificate_status_ascii : 0 : 2 } " = = "01" ] ] ; then
# This is a certificate status message of type "ocsp"
ocsp_response_len = 2*$( hex2dec " ${ tls_certificate_status_ascii : 2 : 6 } " )
if [ [ $ocsp_response_len -ne $tls_certificate_status_ascii_len -8 ] ] ; then
debugme echo "Malformed certificate status Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
ocsp_response = $( mktemp $TEMPDIR /ocsp_response.XXXXXX) || return 1
asciihex_to_binary_file " ${ tls_certificate_status_ascii : 8 : ocsp_response_len } " " $ocsp_response "
elif [ [ $tls_certificate_status_ascii_len -ne 0 ] ] && [ [ " ${ tls_certificate_status_ascii : 0 : 2 } " = = "02" ] ] ; then
# This is a list of OCSP responses, but only the first one is needed
# since the first one corresponds to the server's certificate.
ocsp_response_list_len = 2*$( hex2dec " ${ tls_certificate_status_ascii : 2 : 6 } " )
if [ [ $ocsp_response_list_len -ne $tls_certificate_status_ascii_len -8 ] ] || [ [ $ocsp_response_list_len -lt 6 ] ] ; then
debugme echo "Malformed certificate status Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
ocsp_response_len = 2*$( hex2dec " ${ tls_certificate_status_ascii : 8 : 6 } " )
if [ [ $ocsp_response_len -gt $ocsp_response_list_len -6 ] ] ; then
debugme echo "Malformed certificate status Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
ocsp_response = $( mktemp $TEMPDIR /ocsp_response.XXXXXX) || return 1
asciihex_to_binary_file " ${ tls_certificate_status_ascii : 14 : ocsp_response_len } " " $ocsp_response "
fi
if [ [ -n " $ocsp_response " ] ] ; then
echo "OCSP response:" >> $TMPFILE
echo "===============================================================================" >> $TMPFILE
if [ [ -n " $hostcert_issuer " ] ] ; then
$OPENSSL ocsp -no_nonce -CAfile $TEMPDIR /intermediatecerts.pem -issuer $hostcert_issuer -cert $HOSTCERT -respin $ocsp_response -resp_text >> $TMPFILE 2>$ERRFILE
rm " $hostcert_issuer "
else
$OPENSSL ocsp -respin $ocsp_response -resp_text >> $TMPFILE 2>$ERRFILE
fi
echo "===============================================================================" >> $TMPFILE
elif [ [ " $process_full " = = "all" ] ] ; then
echo "OCSP response: no response sent" >> $TMPFILE
echo "===============================================================================" >> $TMPFILE
fi
2016-11-03 21:14:14 +01:00
# Now parse the server key exchange message
if [ [ $tls_serverkeyexchange_ascii_len -ne 0 ] ] ; then
2017-02-24 16:22:59 +01:00
if [ [ $rfc_cipher_suite = ~ TLS_ECDHE_ ] ] || [ [ $rfc_cipher_suite = ~ TLS_ECDH_anon ] ] || \
2016-11-21 17:30:01 +01:00
[ [ $rfc_cipher_suite = = ECDHE* ] ] || [ [ $rfc_cipher_suite = = AECDH* ] ] ; then
2016-11-03 21:14:14 +01:00
if [ [ $tls_serverkeyexchange_ascii_len -lt 6 ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
curve_type = $( hex2dec " ${ tls_serverkeyexchange_ascii : 0 : 2 } " )
if [ [ $curve_type -eq 3 ] ] ; then
# named_curve - the curve is identified by a 2-byte number
named_curve = $( hex2dec " ${ tls_serverkeyexchange_ascii : 2 : 4 } " )
# http://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8
case $named_curve in
1) dh_bits = 163 ; named_curve_str = "K-163" ; ;
2016-12-08 16:19:57 +01:00
2) dh_bits = 162 ; named_curve_str = "sect163r1" ; ;
2016-11-03 21:14:14 +01:00
3) dh_bits = 163 ; named_curve_str = "B-163" ; ;
4) dh_bits = 193 ; named_curve_str = "sect193r1" ; ;
5) dh_bits = 193 ; named_curve_str = "sect193r2" ; ;
2016-12-08 16:19:57 +01:00
6) dh_bits = 232 ; named_curve_str = "K-233" ; ;
2016-11-03 21:14:14 +01:00
7) dh_bits = 233 ; named_curve_str = "B-233" ; ;
2016-12-08 16:19:57 +01:00
8) dh_bits = 238 ; named_curve_str = "sect239k1" ; ;
9) dh_bits = 281 ; named_curve_str = "K-283" ; ;
10) dh_bits = 282 ; named_curve_str = "B-283" ; ;
11) dh_bits = 407 ; named_curve_str = "K-409" ; ;
2016-11-03 21:14:14 +01:00
12) dh_bits = 409 ; named_curve_str = "B-409" ; ;
2016-12-08 16:19:57 +01:00
13) dh_bits = 570 ; named_curve_str = "K-571" ; ;
14) dh_bits = 570 ; named_curve_str = "B-571" ; ;
15) dh_bits = 161 ; named_curve_str = "secp160k1" ; ;
16) dh_bits = 161 ; named_curve_str = "secp160r1" ; ;
17) dh_bits = 161 ; named_curve_str = "secp160r2" ; ;
2016-11-03 21:14:14 +01:00
18) dh_bits = 192 ; named_curve_str = "secp192k1" ; ;
19) dh_bits = 192 ; named_curve_str = "P-192" ; ;
2016-12-08 16:19:57 +01:00
20) dh_bits = 225 ; named_curve_str = "secp224k1" ; ;
2016-11-03 21:14:14 +01:00
21) dh_bits = 224 ; named_curve_str = "P-224" ; ;
22) dh_bits = 256 ; named_curve_str = "secp256k1" ; ;
23) dh_bits = 256 ; named_curve_str = "P-256" ; ;
24) dh_bits = 384 ; named_curve_str = "P-384" ; ;
25) dh_bits = 521 ; named_curve_str = "P-521" ; ;
26) dh_bits = 256 ; named_curve_str = "brainpoolP256r1" ; ;
27) dh_bits = 384 ; named_curve_str = "brainpoolP384r1" ; ;
28) dh_bits = 512 ; named_curve_str = "brainpoolP512r1" ; ;
2016-11-08 16:20:48 +01:00
29) dh_bits = 253 ; named_curve_str = "X25519" ; ;
2016-11-03 21:14:14 +01:00
30) dh_bits = 448 ; named_curve_str = "X448" ; ;
esac
fi
2016-11-08 16:20:48 +01:00
if [ [ $dh_bits -ne 0 ] ] && [ [ $named_curve -ne 29 ] ] && [ [ $named_curve -ne 30 ] ] ; then
2017-08-01 20:49:06 +02:00
[ [ $DEBUG -ge 3 ] ] && echo -e " dh_bits: ECDH, $named_curve_str , $dh_bits bits\n "
2016-11-08 16:20:48 +01:00
echo " Server Temp Key: ECDH, $named_curve_str , $dh_bits bits " >> $TMPFILE
elif [ [ $dh_bits -ne 0 ] ] ; then
2017-08-01 20:49:06 +02:00
[ [ $DEBUG -ge 3 ] ] && echo -e " dh_bits: $named_curve_str , $dh_bits bits\n "
2016-11-08 16:20:48 +01:00
echo " Server Temp Key: $named_curve_str , $dh_bits bits " >> $TMPFILE
fi
2017-02-24 16:22:59 +01:00
elif [ [ $rfc_cipher_suite = ~ TLS_DHE_ ] ] || [ [ $rfc_cipher_suite = ~ TLS_DH_anon ] ] || \
2016-11-21 17:30:01 +01:00
[ [ $rfc_cipher_suite = = "DHE-" * ] ] || [ [ $rfc_cipher_suite = = "EDH-" * ] ] || \
[ [ $rfc_cipher_suite = = "EXP1024-DHE-" * ] ] ; then
2016-11-03 21:14:14 +01:00
# For DH ephemeral keys the first field is p, and the length of
# p is the same as the length of the public key.
if [ [ $tls_serverkeyexchange_ascii_len -lt 4 ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
2016-11-18 16:19:44 +01:00
dh_p_len = 2*$( hex2dec " ${ tls_serverkeyexchange_ascii : 0 : 4 } " )
offset = 4+$dh_p_len
2016-11-03 21:14:14 +01:00
if [ [ $tls_serverkeyexchange_ascii_len -lt $offset ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
2016-11-18 16:19:44 +01:00
# Subtract any leading 0 bytes
2016-11-03 21:14:14 +01:00
for ( ( i = 4; i < offset; i = i+2 ) ) ; do
[ [ " ${ tls_serverkeyexchange_ascii : i : 2 } " != "00" ] ] && break
2016-11-18 16:19:44 +01:00
dh_p_len = $dh_p_len -2
2016-11-03 21:14:14 +01:00
done
if [ [ $i -ge $offset ] ] ; then
debugme echo "Malformed ServerKeyExchange Handshake message in ServerHello."
tmpfile_handle $FUNCNAME .txt
return 1
fi
2016-11-18 16:19:44 +01:00
dh_p = " ${ tls_serverkeyexchange_ascii : i : dh_p_len } "
2016-11-03 21:14:14 +01:00
2016-11-18 16:19:44 +01:00
dh_bits = 4*$dh_p_len
2016-11-03 21:14:14 +01:00
msb = $( hex2dec " ${ tls_serverkeyexchange_ascii : i : 2 } " )
for ( ( mask = 128; msb < mask; mask/= 2 ) ) ; do
dh_bits = $dh_bits -1
done
2016-12-29 22:31:42 +01:00
key_bitstring = " $( get_dh_ephemeralkey " $tls_serverkeyexchange_ascii " ) "
[ [ $? -eq 0 ] ] && echo " $key_bitstring " >> $TMPFILE
2016-11-18 16:19:44 +01:00
# Check to see whether the ephemeral public key uses one of the groups from
2017-01-05 20:20:19 +01:00
# RFC 7919 for parameters
2016-11-18 16:19:44 +01:00
case $dh_bits in
2048) named_curve = 256; named_curve_str = " ffdhe2048," ; ;
3072) named_curve = 257; named_curve_str = " ffdhe3072," ; ;
4096) named_curve = 258; named_curve_str = " ffdhe4096," ; ;
6144) named_curve = 259; named_curve_str = " ffdhe6144," ; ;
8192) named_curve = 260; named_curve_str = " ffdhe8192," ; ;
*) named_curve = 0; named_curve_str = "" ; ;
esac
[ [ -z " $key_bitstring " ] ] && named_curve = 0 && named_curve_str = ""
2017-02-24 16:22:59 +01:00
if [ [ $named_curve -ne 0 ] ] && [ [ " ${ TLS13_KEY_SHARES [named_curve] } " = ~ BEGIN ] ] ; then
2017-10-27 19:07:04 +02:00
ephemeral_param = " $( $OPENSSL pkey -pubin -text -noout 2>>$ERRFILE <<< " $key_bitstring " | grep -A 1000 "prime:" ) "
rfc7919_param = " $( $OPENSSL pkey -text -noout 2>>$ERRFILE <<< " ${ TLS13_KEY_SHARES [named_curve] } " | grep -A 1000 "prime:" ) "
2016-11-18 16:19:44 +01:00
[ [ " $ephemeral_param " != " $rfc7919_param " ] ] && named_curve_str = ""
fi
2017-08-01 20:49:06 +02:00
[ [ $DEBUG -ge 3 ] ] && [ [ $dh_bits -ne 0 ] ] && echo -e " dh_bits: DH, $named_curve_str $dh_bits bits\n "
2016-11-03 21:14:14 +01:00
[ [ $dh_bits -ne 0 ] ] && echo " Server Temp Key: DH, $named_curve_str $dh_bits bits " >> $TMPFILE
fi
fi
2016-06-17 22:33:00 +02:00
tmpfile_handle $FUNCNAME .txt
2015-09-17 15:30:15 +02:00
return 0
2015-05-17 22:43:53 +02:00
}
2016-09-28 19:46:43 +02:00
#arg1: list of ciphers suites or empty
2016-09-28 23:15:37 +02:00
#arg2: "true" if full server response should be parsed.
2015-05-17 22:43:53 +02:00
sslv2_sockets( ) {
2016-08-11 20:16:33 +02:00
local ret
2016-09-28 19:46:43 +02:00
local client_hello cipher_suites len_client_hello
local len_ciph_suites_byte len_ciph_suites
2017-01-04 16:47:36 +01:00
local server_hello sock_reply_file2
local -i response_len server_hello_len
local parse_complete = false
if [ [ " $2 " = = "true" ] ] ; then
parse_complete = true
fi
2016-09-28 19:46:43 +02:00
if [ [ -n " $1 " ] ] ; then
cipher_suites = " $1 "
else
cipher_suites = "
05,00,80, # 1st cipher 9 cipher specs, only classical V2 ciphers are used here, see FIXME below
03,00,80, # 2nd there are v3 in v2!!! : https://tools.ietf.org/html/rfc6101#appendix-E
01,00,80, # 3rd Cipher specifications introduced in version 3.0 can be included in version 2.0 client hello messages using
07,00,c0, # 4th the syntax below. [..] # V2CipherSpec (see Version 3.0 name) = { 0x00, CipherSuite }; !!!!
08,00,80, # 5th
06,00,40, # 6th
04,00,80, # 7th
02,00,80, # 8th
00,00,00" # 9th
# FIXME: http://max.euston.net/d/tip_sslciphers.html
fi
code2network " $cipher_suites " # convert CIPHER_SUITES
cipher_suites = " $NW_STR " # we don't have the leading \x here so string length is two byte less, see next
2017-02-24 16:22:59 +01:00
len_ciph_suites_byte = ${# cipher_suites }
2016-09-28 19:46:43 +02:00
let "len_ciph_suites_byte += 2"
2017-04-12 21:00:08 +02:00
len_ciph_suites = $( printf "%02x\n" $(( len_ciph_suites_byte / 4 )) )
2016-09-28 19:46:43 +02:00
len_client_hello = $( printf "%02x\n" $(( 0 x$len_ciph_suites + 0 x19)) )
client_hello = "
,80,$len_client_hello # length
,01 # Client Hello
,00,02 # SSLv2
,00,$len_ciph_suites # cipher spec length
,00,00 # session ID length
,00,10 # challenge length
,$cipher_suites
,29,22,be,b3,5a,01,8b,04,fe,5f,80,03,a0,13,eb,c4" # Challenge
# https://idea.popcount.org/2012-06-16-dissecting-ssl-handshake/ (client)
2015-09-17 15:30:15 +02:00
fd_socket 5 || return 6
2017-07-26 22:37:50 +02:00
debugme echo -n "sending client hello... "
2016-09-28 19:46:43 +02:00
socksend_sslv2_clienthello " $client_hello "
2015-09-17 15:30:15 +02:00
sockread_serverhello 32768
2017-01-04 16:47:36 +01:00
if " $parse_complete " ; then
server_hello = $( hexdump -v -e '16/1 "%02X"' " $SOCK_REPLY_FILE " )
server_hello_len = 2+$( hex2dec " ${ server_hello : 1 : 3 } " )
response_len = $( wc -c " $SOCK_REPLY_FILE " | awk '{ print $1 }' )
for ( ( 1; response_len < server_hello_len; 1 ) ) ; do
sock_reply_file2 = $( mktemp $TEMPDIR /ddreply.XXXXXX) || return 7
mv " $SOCK_REPLY_FILE " " $sock_reply_file2 "
2017-07-26 22:37:50 +02:00
debugme echo -n "requesting more server hello data... "
2017-01-04 16:47:36 +01:00
socksend "" $USLEEP_SND
sockread_serverhello 32768
[ [ ! -s " $SOCK_REPLY_FILE " ] ] && break
cat " $SOCK_REPLY_FILE " >> " $sock_reply_file2 "
mv " $sock_reply_file2 " " $SOCK_REPLY_FILE "
response_len = $( wc -c " $SOCK_REPLY_FILE " | awk '{ print $1 }' )
done
fi
2017-07-26 22:37:50 +02:00
debugme echo "reading server hello... "
2015-09-17 15:30:15 +02:00
if [ [ " $DEBUG " -ge 4 ] ] ; then
hexdump -C " $SOCK_REPLY_FILE " | head -6
2017-04-13 20:28:39 +02:00
tmln_out
2015-09-17 15:30:15 +02:00
fi
2017-01-04 16:47:36 +01:00
parse_sslv2_serverhello " $SOCK_REPLY_FILE " " $parse_complete "
2016-08-11 20:16:33 +02:00
ret = $?
2015-09-17 15:30:15 +02:00
close_socket
2017-04-22 15:39:18 +02:00
tmpfile_handle $FUNCNAME .dd $SOCK_REPLY_FILE
2015-09-17 15:30:15 +02:00
return $ret
2015-05-17 22:43:53 +02:00
}
2017-10-03 22:10:09 +02:00
# arg1: supported groups extension
# arg2: "all" - process full response (including Certificate and certificate_status handshake messages)
# "ephemeralkey" - extract the server's ephemeral key (if any)
# Given the supported groups extension, create a key_share extension that includes a key share for
# each group listed in the supported groups extension.
generate_key_share_extension( ) {
local supported_groups
local -i i len supported_groups_len group
local extn_len list_len
local key_share key_shares = ""
supported_groups = " ${ 1 // \\ x / } "
[ [ " ${ supported_groups : 0 : 4 } " != "000a" ] ] && return 1
supported_groups_len = ${# supported_groups }
[ [ $supported_groups_len -lt 16 ] ] && return 1
len = 2*$( hex2dec " ${ supported_groups : 4 : 4 } " )
[ [ $len +8 -ne $supported_groups_len ] ] && return 1
len = 2*$( hex2dec " ${ supported_groups : 8 : 4 } " )
[ [ $len +12 -ne $supported_groups_len ] ] && return 1
for ( ( i = 12; i<supported_groups_len; i = i+4 ) ) ; do
group = $( hex2dec " ${ supported_groups : i : 4 } " )
# If the Supported groups extensions lists more than one group,
# then don't include the larger key shares in the extension.
[ [ $i -gt 12 ] ] && [ [ $group -gt 256 ] ] && continue
# Versions of OpenSSL prior to 1.1.0 cannot perform operations
# with X25519 keys, so don't include the X25519 key share
# if the server's response needs to be decrypted and an
# older version of OpenSSL is being used.
[ [ $i -gt 12 ] ] && [ [ $group -eq 29 ] ] && [ [ " $2 " = = "all" ] ] && \
[ [ $OSSL_VER_MAJOR .$OSSL_VER_MINOR != "1.1.0" * ] ] && \
[ [ $OSSL_VER_MAJOR .$OSSL_VER_MINOR != "1.1.1" * ] ] && \
continue
# NOTE: The public keys could be extracted from the private keys
# (TLS13_KEY_SHARES) using $OPENSSL, but only OpenSSL 1.1.0 can
# extract the public key from an X25519 private key.
key_share = " ${ TLS13_PUBLIC_KEY_SHARES [group] } "
if [ [ ${# key_share } -gt 4 ] ] ; then
key_shares += " , $key_share "
fi
done
[ [ -z " $key_shares " ] ] && tm_out "" && return 0
len = ${# key_shares } /3
list_len = " $( printf "%04x" " $len " ) "
len += 2
extn_len = " $( printf "%04x" " $len " ) "
tm_out " 00,28, ${ extn_len : 0 : 2 } , ${ extn_len : 2 : 2 } , ${ list_len : 0 : 2 } , ${ list_len : 2 : 2 } $key_shares "
return 0
}
2015-05-17 22:43:53 +02:00
# ARG1: TLS version low byte (00: SSLv3, 01: TLS 1.0, 02: TLS 1.1, 03: TLS 1.2)
2017-07-11 21:10:40 +02:00
# ARG2: CIPHER_SUITES string (lowercase, and in the format output by code2network())
2017-10-03 22:10:09 +02:00
# ARG3: "all" - process full response (including Certificate and certificate_status handshake messages)
# "ephemeralkey" - extract the server's ephemeral key (if any)
# ARG4: (optional) additional request extensions
# ARG5: (optional): "true" if ClientHello should advertise compression methods other than "NULL"
2015-05-17 22:43:53 +02:00
socksend_tls_clienthello( ) {
2017-10-03 22:10:09 +02:00
local tls_low_byte = " $1 " tls_legacy_version = " $1 "
local process_full = " $3 "
2015-10-11 23:07:16 +02:00
local tls_word_reclayer = "03, 01" # the first TLS version number is the record layer and always 0301 -- except: SSLv3
2015-09-17 15:30:15 +02:00
local servername_hexstr len_servername len_servername_hex
2016-04-13 21:39:12 +02:00
local hexdump_format_str part1 part2
local all_extensions = ""
2016-05-03 22:48:42 +02:00
local -i i j len_extension len_padding_extension len_all
local len_sni_listlen len_sni_ext len_extension_hex len_padding_extension_hex
2016-04-13 21:39:12 +02:00
local cipher_suites len_ciph_suites len_ciph_suites_byte len_ciph_suites_word
2015-09-17 15:30:15 +02:00
local len_client_hello_word len_all_word
2016-04-13 21:39:12 +02:00
local ecc_cipher_suite_found = false
local extension_signature_algorithms extension_heartbeat
2016-11-03 15:49:27 +01:00
local extension_session_ticket extension_next_protocol extension_padding
local extension_supported_groups = "" extension_supported_point_formats = ""
2017-10-03 22:10:09 +02:00
local extensions_key_share = "" extn_type supported_groups_c2n = ""
local extra_extensions extra_extensions_list = "" extension_supported_versions = ""
2017-03-25 13:23:21 +01:00
local offer_compression = false compression_methods
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
# TLSv1.3 ClientHello messages MUST specify only the NULL compression method.
2017-10-03 22:10:09 +02:00
[ [ " $5 " = = "true" ] ] && [ [ " 0x $tls_low_byte " -le "0x03" ] ] && offer_compression = true
2015-09-17 15:30:15 +02:00
2017-07-11 21:10:40 +02:00
cipher_suites = " $2 " # we don't have the leading \x here so string length is two byte less, see next
2017-02-24 16:22:59 +01:00
len_ciph_suites_byte = ${# cipher_suites }
2015-09-17 15:30:15 +02:00
let "len_ciph_suites_byte += 2"
# we have additional 2 chars \x in each 2 byte string and 2 byte ciphers, so we need to divide by 4:
2017-04-12 21:00:08 +02:00
len_ciph_suites = $( printf "%02x\n" $(( len_ciph_suites_byte / 4 )) )
2015-09-17 15:30:15 +02:00
len2twobytes " $len_ciph_suites "
len_ciph_suites_word = " $LEN_STR "
#[[ $DEBUG -ge 3 ]] && echo $len_ciph_suites_word
2016-04-13 21:39:12 +02:00
if [ [ " $tls_low_byte " != "00" ] ] ; then
# Add extensions
# Check to see if any ECC cipher suites are included in cipher_suites
2017-10-03 22:10:09 +02:00
# (not needed for TLSv1.3)
if [ [ " 0x $tls_low_byte " -le "0x03" ] ] ; then
for ( ( i = 0; i<len_ciph_suites_byte; i = i+8 ) ) ; do
j = $i +4
part1 = " 0x ${ cipher_suites : $i : 2 } "
part2 = " 0x ${ cipher_suites : $j : 2 } "
if [ [ " $part1 " = = "0xc0" ] ] ; then
if [ [ " $part2 " -ge "0x01" ] ] && [ [ " $part2 " -le "0x19" ] ] ; then
ecc_cipher_suite_found = true && break
elif [ [ " $part2 " -ge "0x23" ] ] && [ [ " $part2 " -le "0x3b" ] ] ; then
ecc_cipher_suite_found = true && break
elif [ [ " $part2 " -ge "0x48" ] ] && [ [ " $part2 " -le "0x4f" ] ] ; then
ecc_cipher_suite_found = true && break
elif [ [ " $part2 " -ge "0x5c" ] ] && [ [ " $part2 " -le "0x63" ] ] ; then
ecc_cipher_suite_found = true && break
elif [ [ " $part2 " -ge "0x70" ] ] && [ [ " $part2 " -le "0x79" ] ] ; then
ecc_cipher_suite_found = true && break
elif [ [ " $part2 " -ge "0x86" ] ] && [ [ " $part2 " -le "0x8d" ] ] ; then
ecc_cipher_suite_found = true && break
elif [ [ " $part2 " -ge "0x9a" ] ] && [ [ " $part2 " -le "0x9b" ] ] ; then
ecc_cipher_suite_found = true && break
elif [ [ " $part2 " -ge "0xac" ] ] && [ [ " $part2 " -le "0xaf" ] ] ; then
ecc_cipher_suite_found = true && break
fi
elif [ [ " $part1 " = = "0xcc" ] ] ; then
if [ [ " $part2 " = = "0xa8" ] ] || [ [ " $part2 " = = "0xa9" ] ] || \
[ [ " $part2 " = = "0xac" ] ] || [ [ " $part2 " = = "0x13" ] ] || \
[ [ " $part2 " = = "0x14" ] ] ; then
ecc_cipher_suite_found = true && break
fi
2016-04-13 21:39:12 +02:00
fi
2017-10-03 22:10:09 +02:00
done
fi
2016-04-13 21:39:12 +02:00
2016-09-01 19:22:39 +02:00
if [ [ -n " $SNI " ] ] ; then
#formatted example for SNI
#00 00 # extension server_name
#00 1a # length = the following +2 = server_name length + 5
#00 18 # server_name list_length = server_name length +3
#00 # server_name type (hostname)
#00 15 # server_name length
#66 66 66 66 66 66 2e 66 66 66 66 66 66 66 66 66 66 2e 66 66 66 target.mydomain1.tld # server_name target
len_servername = ${# NODE }
hexdump_format_str = " $len_servername /1 \"%02x,\" "
servername_hexstr = $( printf $NODE | hexdump -v -e " ${ hexdump_format_str } " | sed 's/,$//' )
# convert lengths we need to fill in from dec to hex:
len_servername_hex = $( printf "%02x\n" $len_servername )
len_sni_listlen = $( printf "%02x\n" $(( len_servername+3)) )
len_sni_ext = $( printf "%02x\n" $(( len_servername+5)) )
fi
2016-04-13 21:39:12 +02:00
2017-10-03 22:10:09 +02:00
if [ [ 0x$tls_low_byte -le 0x03 ] ] ; then
extension_signature_algorithms = "
00, 0d, # Type: signature_algorithms , see RFC 5246
00, 20, 00,1e, # lengths
06,01, 06,02, 06,03, 05,01, 05,02, 05,03, 04,01, 04,02, 04,03,
03,01, 03,02, 03,03, 02,01, 02,02, 02,03"
else
extension_signature_algorithms = "
00, 0d, # Type: signature_algorithms , see draft-ietf-tls-tls13
00, 1c, 00, 1a, # lengths
04,03, 05,03, 06,03, 08,04, 08,05, 08,06,
04,01, 05,01, 06,01, 08,07, 08,08, 02,01, 02,03"
fi
2016-04-13 21:39:12 +02:00
extension_heartbeat = "
00, 0f, 00, 01, 01"
extension_session_ticket = "
00, 23, 00, 00"
extension_next_protocol = "
33, 74, 00, 00"
2016-11-03 15:49:27 +01:00
if " $ecc_cipher_suite_found " ; then
# Supported Groups Extension
extension_supported_groups = "
00, 0a, # Type: Supported Elliptic Curves , see RFC 4492
00, 3e, 00, 3c, # lengths
00, 0e, 00, 0d, 00, 19, 00, 1c, 00, 1e, 00, 0b, 00, 0c, 00, 1b,
00, 18, 00, 09, 00, 0a, 00, 1a, 00, 16, 00, 17, 00, 1d, 00, 08,
00, 06, 00, 07, 00, 14, 00, 15, 00, 04, 00, 05, 00, 12, 00, 13,
00, 01, 00, 02, 00, 03, 00, 0f, 00, 10, 00, 11"
2017-10-03 22:10:09 +02:00
elif [ [ 0x$tls_low_byte -gt 0x03 ] ] ; then
# Supported Groups Extension
if [ [ " $process_full " != "all" ] ] || \
[ [ $OSSL_VER_MAJOR .$OSSL_VER_MINOR = = "1.1.0" * ] ] || \
[ [ $OSSL_VER_MAJOR .$OSSL_VER_MINOR = = "1.1.1" * ] ] ; then
extension_supported_groups = "
00,0a, # Type: Supported Groups, see draft-ietf-tls-tls13
00,0e, 00,0c, # lengths
00,1d, 00,17, 00,18, 00,19,
01,00, 01,01"
else
# OpenSSL prior to 1.1.0 does not support X25519, so list it as the least
# preferred option if the response needs to be decrypted.
extension_supported_groups = "
00,0a, # Type: Supported Groups, see draft-ietf-tls-tls13
00,0e, 00,0c, # lengths
00,17, 00,18, 00,19,
01,00, 01,01, 00,1d"
fi
code2network " $extension_supported_groups "
supported_groups_c2n = " $NW_STR "
fi
if " $ecc_cipher_suite_found " || [ [ 0x$tls_low_byte -gt 0x03 ] ] ; then
# Supported Point Formats Extension.
2016-11-03 15:49:27 +01:00
extension_supported_point_formats = "
2017-01-05 20:20:19 +01:00
00, 0b, # Type: Supported Point Formats , see RFC 4492
2016-11-03 15:49:27 +01:00
00, 02, # len
01, 00"
fi
2016-10-28 15:30:07 +02:00
2016-11-03 15:49:27 +01:00
# Each extension should appear in the ClientHello at most once. So,
# find out what extensions were provided as an argument and only use
# the provided values for those extensions.
2017-10-03 22:10:09 +02:00
extra_extensions = " $( tolower " $4 " ) "
2016-11-03 15:49:27 +01:00
code2network " $extra_extensions "
len_all = ${# extra_extensions }
for ( ( i = 0; i < len_all; i = i+16+4*0x$len_extension_hex ) ) ; do
part2 = $i +4
2017-10-03 22:10:09 +02:00
extn_type = " ${ NW_STR : i : 2 } ${ NW_STR : part2 : 2 } "
extra_extensions_list += " $extn_type "
2016-11-03 15:49:27 +01:00
j = $i +8
part2 = $j +4
len_extension_hex = " ${ NW_STR : j : 2 } ${ NW_STR : part2 : 2 } "
2017-10-03 22:10:09 +02:00
if [ [ " $extn_type " = = "000a" ] ] && [ [ 0x$tls_low_byte -gt 0x03 ] ] ; then
j = 14+4*0x$len_extension_hex
supported_groups_c2n = " ${ NW_STR : i : j } "
fi
2016-11-03 15:49:27 +01:00
done
2017-10-03 22:10:09 +02:00
if [ [ 0x$tls_low_byte -gt 0x03 ] ] ; then
extensions_key_share = " $( generate_key_share_extension " $supported_groups_c2n " " $process_full " ) "
[ [ $? -ne 0 ] ] && return 1
fi
2016-04-13 21:39:12 +02:00
2016-11-03 15:49:27 +01:00
if [ [ -n " $SNI " ] ] && [ [ ! " $extra_extensions_list " = ~ " 0000 " ] ] ; then
all_extensions = "
00, 00 # extension server_name
2016-09-01 19:22:39 +02:00
,00, $len_sni_ext # length SNI EXT
,00, $len_sni_listlen # server_name list_length
,00 # server_name type (hostname)
,00, $len_servername_hex # server_name length. We assume len(hostname) < FF - 9
,$servername_hexstr " # server_name target
fi
2017-10-03 22:10:09 +02:00
if [ [ 0x$tls_low_byte -ge 0x04 ] ] && [ [ ! " $extra_extensions_list " = ~ " 002b " ] ] ; then
# Add supported_versions extension listing all TLS/SSL versions
# from the one specified in $tls_low_byte to SSLv3.
for ( ( i = 0x$tls_low_byte ; i >= 0; i = i-1 ) ) ; do
if [ [ 0x$i -eq 4 ] ] ; then
# FIXME: The ClientHello currently indicates support
# for drafts 18, 19, 20, and 21 of TLSv1.3 in addition
# to the final version of TLSv1.3. In the future, the
# draft versions should be removed.
extension_supported_versions += ", 03, 04, 7f, 15, 7f, 14, 7f, 13, 7f, 12"
else
extension_supported_versions += " , 03, $( printf "%02x" $i ) "
fi
done
[ [ -n " $all_extensions " ] ] && all_extensions += ","
# FIXME: Adjust the lengths ("+11" and "+9") when the draft versions of TLSv1.3 are removed.
all_extensions += " 00, 2b, 00, $( printf "%02x" $(( 2 * 0 x$tls_low_byte + 11 )) ) , $( printf "%02x" $(( 2 * 0 x$tls_low_byte + 10 )) ) $extension_supported_versions "
fi
Make sure last ClientHello extension is not empty
According to a discussion thread on the IETF TLS WG mail list (see https://www.ietf.org/mail-archive/web/tls/current/msg19720.html), there is at least one TLS server that will fail if the last extension in the ClientHello has contains extension_data of length 0.
Currently, `tls_sockets()` will create such a ClientHello if:
* The padding extension is included, and the length of the ClientHello without the padding data would be between 508 and 511 bytes.
* No padding extension is included, and the caller provided `$extra_extensions` in which the last extension in `$extra_extensions` is empty.
* No padding extension is included, `$extra_extensions` is empty, no ECC cipher suites are offered, and the ClientHello is for TLSv1.1 or below (in this case the next protocol extension would be that last one).
This PR avoids the server bug (in nearly all cases) by ensuring the the padding extension (when present) always contains at least one byte, and by ensuring that when the padding extension is not present that the (non-empty) heartbeat extension is the last extension.
This PR does leave one possible scenario in which the last extension would be empty. If the caller provides an `$extra_extensions` in which the last extension in `$extra_extensions` is empty, `tls_sockets()` does not add a padding extension (or a padding extension is included in `$extra_extensions`), and `$extra_extensions` includes a heartbeat extension, then the last extension in the ClientHello would be empty. This, however, is a highly unlikely scenario, and certainly there are currently no such calls to `tls_sockets()` in testssl.sh.
2017-07-14 21:48:59 +02:00
2016-11-03 15:49:27 +01:00
if [ [ ! " $extra_extensions_list " = ~ " 0023 " ] ] ; then
[ [ -n " $all_extensions " ] ] && all_extensions += ","
all_extensions += " $extension_session_ticket "
fi
# If the ClientHello will include the ALPN extension, then don't include the NPN extension.
if [ [ ! " $extra_extensions_list " = ~ " 3374 " ] ] && [ [ ! " $extra_extensions_list " = ~ " 0010 " ] ] ; then
[ [ -n " $all_extensions " ] ] && all_extensions += ","
all_extensions += " $extension_next_protocol "
fi
2016-09-01 19:22:39 +02:00
2016-05-05 23:08:40 +02:00
# RFC 5246 says that clients MUST NOT offer the signature algorithms
# extension if they are offering TLS versions prior to 1.2.
2016-11-03 15:49:27 +01:00
if [ [ " 0x $tls_low_byte " -ge "0x03" ] ] && [ [ ! " $extra_extensions_list " = ~ " 000d " ] ] ; then
[ [ -n " $all_extensions " ] ] && all_extensions += ","
all_extensions += " $extension_signature_algorithms "
fi
if [ [ -n " $extension_supported_groups " ] ] && [ [ ! " $extra_extensions_list " = ~ " 000a " ] ] ; then
[ [ -n " $all_extensions " ] ] && all_extensions += ","
all_extensions += " $extension_supported_groups "
fi
2017-10-03 22:10:09 +02:00
if [ [ -n " $extensions_key_share " ] ] && [ [ ! " $extra_extensions_list " = ~ " 0028 " ] ] ; then
[ [ -n " $all_extensions " ] ] && all_extensions += ","
all_extensions += " $extensions_key_share "
fi
2016-11-03 15:49:27 +01:00
if [ [ -n " $extension_supported_point_formats " ] ] && [ [ ! " $extra_extensions_list " = ~ " 000b " ] ] ; then
[ [ -n " $all_extensions " ] ] && all_extensions += ","
all_extensions += " $extension_supported_point_formats "
2016-05-05 23:08:40 +02:00
fi
2016-11-03 15:49:27 +01:00
if [ [ -n " $extra_extensions " ] ] ; then
[ [ -n " $all_extensions " ] ] && all_extensions += ","
all_extensions += " $extra_extensions "
2016-04-13 21:39:12 +02:00
fi
Make sure last ClientHello extension is not empty
According to a discussion thread on the IETF TLS WG mail list (see https://www.ietf.org/mail-archive/web/tls/current/msg19720.html), there is at least one TLS server that will fail if the last extension in the ClientHello has contains extension_data of length 0.
Currently, `tls_sockets()` will create such a ClientHello if:
* The padding extension is included, and the length of the ClientHello without the padding data would be between 508 and 511 bytes.
* No padding extension is included, and the caller provided `$extra_extensions` in which the last extension in `$extra_extensions` is empty.
* No padding extension is included, `$extra_extensions` is empty, no ECC cipher suites are offered, and the ClientHello is for TLSv1.1 or below (in this case the next protocol extension would be that last one).
This PR avoids the server bug (in nearly all cases) by ensuring the the padding extension (when present) always contains at least one byte, and by ensuring that when the padding extension is not present that the (non-empty) heartbeat extension is the last extension.
This PR does leave one possible scenario in which the last extension would be empty. If the caller provides an `$extra_extensions` in which the last extension in `$extra_extensions` is empty, `tls_sockets()` does not add a padding extension (or a padding extension is included in `$extra_extensions`), and `$extra_extensions` includes a heartbeat extension, then the last extension in the ClientHello would be empty. This, however, is a highly unlikely scenario, and certainly there are currently no such calls to `tls_sockets()` in testssl.sh.
2017-07-14 21:48:59 +02:00
# Make sure that a non-empty extension goes last (either heartbeat or padding).
# See PR #792 and https://www.ietf.org/mail-archive/web/tls/current/msg19720.html.
if [ [ ! " $extra_extensions_list " = ~ " 000f " ] ] ; then
[ [ -n " $all_extensions " ] ] && all_extensions += ","
all_extensions += " $extension_heartbeat "
fi
2016-04-13 21:39:12 +02:00
code2network " $all_extensions " # convert extensions
all_extensions = " $NW_STR " # we don't have the leading \x here so string length is two byte less, see next
len_extension = ${# all_extensions }
len_extension += 2
len_extension = $len_extension /4
len_extension_hex = $( printf "%02x\n" $len_extension )
2016-05-03 22:48:42 +02:00
# If the length of the Client Hello would be between 256 and 511 bytes,
# then add a padding extension (see RFC 7685)
len_all = $(( 0 x$len_ciph_suites + 0 x2b + 0 x$len_extension_hex + 0 x2))
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
" $offer_compression " && len_all += 2
2016-11-03 15:49:27 +01:00
if [ [ $len_all -ge 256 ] ] && [ [ $len_all -le 511 ] ] && [ [ ! " $extra_extensions_list " = ~ " 0015 " ] ] ; then
2016-05-03 22:48:42 +02:00
if [ [ $len_all -gt 508 ] ] ; then
Make sure last ClientHello extension is not empty
According to a discussion thread on the IETF TLS WG mail list (see https://www.ietf.org/mail-archive/web/tls/current/msg19720.html), there is at least one TLS server that will fail if the last extension in the ClientHello has contains extension_data of length 0.
Currently, `tls_sockets()` will create such a ClientHello if:
* The padding extension is included, and the length of the ClientHello without the padding data would be between 508 and 511 bytes.
* No padding extension is included, and the caller provided `$extra_extensions` in which the last extension in `$extra_extensions` is empty.
* No padding extension is included, `$extra_extensions` is empty, no ECC cipher suites are offered, and the ClientHello is for TLSv1.1 or below (in this case the next protocol extension would be that last one).
This PR avoids the server bug (in nearly all cases) by ensuring the the padding extension (when present) always contains at least one byte, and by ensuring that when the padding extension is not present that the (non-empty) heartbeat extension is the last extension.
This PR does leave one possible scenario in which the last extension would be empty. If the caller provides an `$extra_extensions` in which the last extension in `$extra_extensions` is empty, `tls_sockets()` does not add a padding extension (or a padding extension is included in `$extra_extensions`), and `$extra_extensions` includes a heartbeat extension, then the last extension in the ClientHello would be empty. This, however, is a highly unlikely scenario, and certainly there are currently no such calls to `tls_sockets()` in testssl.sh.
2017-07-14 21:48:59 +02:00
len_padding_extension = 1 # Final extension cannot be empty: see PR #792
2016-05-03 22:48:42 +02:00
else
2016-11-03 15:49:27 +01:00
len_padding_extension = $(( 508 - 0 x$len_ciph_suites - 0 x2b - 0 x$len_extension_hex - 0 x2))
2016-05-03 22:48:42 +02:00
fi
len_padding_extension_hex = $( printf "%02x\n" $len_padding_extension )
len2twobytes " $len_padding_extension_hex "
all_extensions = " $all_extensions \\x00\\x15\\x ${ LEN_STR : 0 : 2 } \\x ${ LEN_STR : 4 : 2 } "
for ( ( i = 0; i<len_padding_extension; i++ ) ) ; do
all_extensions = " $all_extensions \\x00 "
done
len_extension = $len_extension +$len_padding_extension +0x4
len_extension_hex = $( printf "%02x\n" $len_extension )
fi
len2twobytes " $len_extension_hex "
2016-04-13 21:39:12 +02:00
all_extensions = "
2016-05-03 22:48:42 +02:00
,$LEN_STR # first the len of all extentions.
2016-04-13 21:39:12 +02:00
,$all_extensions "
2016-10-28 15:30:07 +02:00
2016-04-13 21:39:12 +02:00
fi
2015-09-17 15:30:15 +02:00
# RFC 3546 doesn't specify SSLv3 to have SNI, openssl just ignores the switch if supplied
if [ [ " $tls_low_byte " = = "00" ] ] ; then
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
len_all = $(( 0 x$len_ciph_suites + 0 x27))
2015-09-17 15:30:15 +02:00
else
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
len_all = $(( 0 x$len_ciph_suites + 0 x27 + 0 x$len_extension_hex + 0 x2))
2015-09-17 15:30:15 +02:00
fi
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
" $offer_compression " && len_all += 2
len2twobytes $( printf "%02x\n" $len_all )
2015-09-17 15:30:15 +02:00
len_client_hello_word = " $LEN_STR "
#[[ $DEBUG -ge 3 ]] && echo $len_client_hello_word
if [ [ " $tls_low_byte " = = "00" ] ] ; then
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
len_all = $(( 0 x$len_ciph_suites + 0 x2b))
2015-09-17 15:30:15 +02:00
else
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
len_all = $(( 0 x$len_ciph_suites + 0 x2b + 0 x$len_extension_hex + 0 x2))
2015-09-17 15:30:15 +02:00
fi
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
" $offer_compression " && len_all += 2
len2twobytes $( printf "%02x\n" $len_all )
2015-09-17 15:30:15 +02:00
len_all_word = " $LEN_STR "
#[[ $DEBUG -ge 3 ]] && echo $len_all_word
2015-10-11 23:07:16 +02:00
# if we have SSLv3, the first occurence of TLS protocol -- record layer -- is SSLv3, otherwise TLS 1.0
[ [ $tls_low_byte = = "00" ] ] && tls_word_reclayer = "03, 00"
2015-09-17 15:30:15 +02:00
2017-10-03 22:10:09 +02:00
[ [ 0x$tls_legacy_version -ge 0x04 ] ] && tls_legacy_version = "03"
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
if " $offer_compression " ; then
# See http://www.iana.org/assignments/comp-meth-ids/comp-meth-ids.xhtml#comp-meth-ids-2
2017-03-25 13:23:21 +01:00
compression_methods = "03,01,40,00" # Offer NULL, DEFLATE, and LZS compression
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
else
2017-03-25 13:23:21 +01:00
compression_methods = "01,00" # Only offer NULL compression (0x00)
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
fi
2015-09-17 15:30:15 +02:00
TLS_CLIENT_HELLO = "
# TLS header ( 5 bytes)
2015-10-11 23:07:16 +02:00
,16, $tls_word_reclayer # TLS Version: in wireshark this is always 01 for TLS 1.0-1.2
2015-09-17 15:30:15 +02:00
,$len_all_word # Length <---
# Handshake header:
,01 # Type (x01 for ClientHello)
,00, $len_client_hello_word # Length ClientHello
2017-10-03 22:10:09 +02:00
,03, $tls_legacy_version # TLS version ClientHello
2015-09-17 15:30:15 +02:00
,54, 51, 1e, 7a # Unix time since see www.moserware.com/2009/06/first-few-milliseconds-of-https.html
,de, ad, be, ef # Random 28 bytes
,31, 33, 07, 00, 00, 00, 00, 00
,cf, bd, 39, 04, cc, 16, 0a, 85
,03, 90, 9f, 77, 04, 33, d4, de
,00 # Session ID length
,$len_ciph_suites_word # Cipher suites length
,$cipher_suites
2017-03-25 13:23:21 +01:00
,$compression_methods "
2015-05-17 22:43:53 +02:00
2015-09-17 15:30:15 +02:00
fd_socket 5 || return 6
code2network " $TLS_CLIENT_HELLO $all_extensions "
2017-02-24 16:22:59 +01:00
data = " $NW_STR "
2015-09-17 15:30:15 +02:00
[ [ " $DEBUG " -ge 4 ] ] && echo " \" $data \" "
printf -- " $data " >& 5 2>/dev/null &
sleep $USLEEP_SND
return 0
2015-05-17 22:43:53 +02:00
}
2017-10-03 22:10:09 +02:00
# arg1: The server's response
# arg2: CIPHER_SUITES string (lowercase, and in the format output by code2network())
# arg3: (optional) additional request extensions
# arg4: "all" - process full response (including Certificate and certificate_status handshake messages)
# "ephemeralkey" - extract the server's ephemeral key (if any)
# Return 0 if the response is not a HelloRetryRequest.
# Return 1 if the response is a malformed HelloRetryRequest or if a new ClientHello cannot be sent.
# Return 2 if the response is a HelloRetryRequest, and sending a new ClientHello succeeded.
# Return 6 if the response is a HelloRetryRequest, and sending a new ClientHello failed.
resend_if_hello_retry_request( ) {
local tls_hello_ascii = " $1 "
local cipher_list_2send = " $2 "
local process_full = " $4 "
local tls_low_byte server_version cipher_suite rfc_cipher_suite
local -i i j msg_len tls_hello_ascii_len
local -i extns_offset hrr_extns_len extra_extensions_len len_extn
local extra_extensions extn_type part2 new_extra_extns = "" new_key_share temp
tls_hello_ascii_len = ${# tls_hello_ascii }
# A HelloRetryRequest is at least 13 bytes long
[ [ $tls_hello_ascii_len -lt 26 ] ] && return 0
# A HelloRetryRequest is a handshake message (16) with a major record version of 03.
[ [ " ${ tls_hello_ascii : 0 : 4 } " != "1603" ] ] && return 0
# The handshake type for hello_retry_request is 06.
[ [ " ${ tls_hello_ascii : 10 : 2 } " != "06" ] ] && return 0
# This appears to be a HelloRetryRequest messsage.
debugme echo "reading hello retry request... "
if [ [ " $DEBUG " -ge 4 ] ] ; then
hexdump -C $SOCK_REPLY_FILE | head -6
echo
fi
# Check the length of the handshake message
msg_len = 2*$( hex2dec " ${ tls_hello_ascii : 6 : 4 } " )
if [ [ $msg_len -ne $tls_hello_ascii_len -10 ] ] ; then
debugme echo "malformed HelloRetryRequest"
return 1
fi
# Check the length of the HelloRetryRequest message.
msg_len = 2*$( hex2dec " ${ tls_hello_ascii : 12 : 6 } " )
if [ [ $msg_len -ne $tls_hello_ascii_len -18 ] ] ; then
debugme echo "malformed HelloRetryRequest"
return 1
fi
server_version = " ${ tls_hello_ascii : 18 : 4 } "
if [ [ " $server_version " = = "0304" ] ] || [ [ 0x$server_version -ge 0x7f13 ] ] ; then
# Starting with TLSv1.3 draft 19, a HelloRetryRequest is at least 15 bytes long
[ [ $tls_hello_ascii_len -lt 30 ] ] && return 0
cipher_suite = " ${ tls_hello_ascii : 22 : 2 } , ${ tls_hello_ascii : 24 : 2 } "
extns_offset = 26
else
extns_offset = 22
fi
# Check the length of the extensions.
hrr_extns_len = 2*$( hex2dec " ${ tls_hello_ascii : extns_offset : 4 } " )
if [ [ $hrr_extns_len -ne $tls_hello_ascii_len -$extns_offset -4 ] ] ; then
debugme echo "malformed HelloRetryRequest"
return 1
fi
if [ [ " ${ server_version : 0 : 2 } " = = "7F" ] ] ; then
tls_low_byte = "04"
else
tls_low_byte = " ${ server_version : 2 : 2 } "
fi
if [ [ $DEBUG -ge 3 ] ] ; then
echo "TLS message fragments:"
echo " tls_protocol (reclyr): 0x ${ tls_hello_ascii : 2 : 4 } "
echo " tls_content_type: 0x16 (handshake)"
echo " msg_len: $( hex2dec " ${ tls_hello_ascii : 6 : 4 } " ) "
echo
echo "TLS handshake message:"
echo " handshake type: 0x06 (hello_retry_request)"
echo " msg_len: $( hex2dec " ${ tls_hello_ascii : 12 : 6 } " ) "
echo
echo "TLS hello retry request message:"
echo " server version: $server_version "
if [ [ " $server_version " = = "0304" ] ] || [ [ 0x$server_version -ge 0x7f13 ] ] ; then
echo -n " cipher suite: $cipher_suite "
if [ [ $TLS_NR_CIPHERS -ne 0 ] ] ; then
if [ [ " ${ cipher_suite : 0 : 2 } " = = "00" ] ] ; then
rfc_cipher_suite = " $( show_rfc_style " x ${ cipher_suite : 3 : 2 } " ) "
else
rfc_cipher_suite = " $( show_rfc_style " x ${ cipher_suite : 0 : 2 } ${ cipher_suite : 3 : 2 } " ) "
fi
else
2017-10-27 19:07:04 +02:00
rfc_cipher_suite = " $( $OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL' 2>/dev/null | grep -i " 0x ${ cipher_suite : 0 : 2 } ,0x ${ cipher_suite : 3 : 2 } " | awk '{ print $3 }' ) "
2017-10-03 22:10:09 +02:00
fi
if [ [ -n " $rfc_cipher_suite " ] ] ; then
echo " ( $rfc_cipher_suite ) "
else
echo ""
fi
fi
fi
# Parse HelloRetryRequest extensions
for ( ( i = extns_offset+4; i < tls_hello_ascii_len; i = i+8+$len_extn ) ) ; do
extn_type = " ${ tls_hello_ascii : i : 4 } "
j = $i +4
len_extn = 2*$( hex2dec " ${ tls_hello_ascii : j : 4 } " )
j += 4
if [ [ $len_extn -gt $tls_hello_ascii_len -$j ] ] ; then
debugme echo "malformed HelloRetryRequest"
return 1
fi
# If the HRR includes a cookie extension, then it needs to be
# included in the next ClientHello.
if [ [ " $extn_type " = = "002C" ] ] ; then
j = 8+$len_extn
new_extra_extns += " ${ tls_hello_ascii : i : j } "
fi
# If the HRR includes a key_share extension, then it specifies the
# group to be used in the next ClientHello. So, create a key_share
# extension that specifies this group.
if [ [ " $extn_type " = = "0028" ] ] ; then
if [ [ $len_extn -ne 4 ] ] ; then
debugme echo "malformed key share extension in HelloRetryRequest"
return 1
fi
[ [ $DEBUG -ge 3 ] ] && echo " key share: 0x ${ tls_hello_ascii : j : 4 } "
new_key_share = " $( generate_key_share_extension " 000a00040002 ${ tls_hello_ascii : j : 4 } " " $process_full " ) "
2017-11-02 16:28:09 +01:00
[ [ $? -ne 0 ] ] && return 1
[ [ -z " $new_key_share " ] ] && return 1
2017-10-03 22:10:09 +02:00
new_extra_extns += " ${ new_key_share //,/ } "
fi
done
debugme echo ""
if [ [ -n " $new_extra_extns " ] ] ; then
temp = " $new_extra_extns "
extra_extensions_len = ${# temp }
new_extra_extns = ""
for ( ( i = 0 ; i < extra_extensions_len; i = i+2 ) ) ; do
new_extra_extns += " , ${ temp : i : 2 } "
done
new_extra_extns = " ${ new_extra_extns : 1 } "
fi
# Include any extra extensions that were included in the first ClientHello,
# except key_share and cookie.
extra_extensions = " $( strip_spaces " $( tolower " $3 " ) " ) "
extra_extensions_len = ${# extra_extensions }
for ( ( i = 0; i < extra_extensions_len; i = i+12+$len_extn ) ) ; do
part2 = $i +3
extn_type = " ${ extra_extensions : i : 2 } ${ extra_extensions : part2 : 2 } "
j = $i +6
part2 = $j +3
len_extn = 3*$( hex2dec " ${ extra_extensions : j : 2 } ${ extra_extensions : part2 : 2 } " )
if [ [ " $extn_type " != "0028" ] ] && [ [ " $extn_type " != "002c" ] ] ; then
j = 12+$len_extn
new_extra_extns += " , ${ extra_extensions : i : j } "
fi
done
debugme echo -en "\nsending second client hello... "
socksend_tls_clienthello " $tls_low_byte " " $cipher_list_2send " " $process_full " " $new_extra_extns "
if [ [ $? -ne 0 ] ] ; then
debugme echo " stuck on sending: $ret "
return 6
fi
sockread_serverhello 32768
return 2
}
2016-01-23 19:18:33 +01:00
# arg1: TLS version low byte
2015-06-22 18:32:40 +02:00
# (00: SSLv3, 01: TLS 1.0, 02: TLS 1.1, 03: TLS 1.2)
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
# arg2: (optional) list of cipher suites
# arg3: (optional): "all" - process full response (including Certificate and certificate_status handshake messages)
# "ephemeralkey" - extract the server's ephemeral key (if any)
2016-11-03 15:49:27 +01:00
# arg4: (optional) additional request extensions
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
# arg5: (optional) "true" if ClientHello should advertise compression methods other than "NULL"
2017-11-07 22:07:30 +01:00
# arg6: (optional) "false" if the connection should not be closed before the function returns.
2017-02-23 17:19:52 +01:00
# return: 0: successful connect | 1: protocol or cipher not available | 2: as (0) but downgraded
# 6: couldn't open socket | 7: couldn't open temp file
2015-05-17 22:43:53 +02:00
tls_sockets( ) {
2015-09-17 15:30:15 +02:00
local -i ret = 0
local -i save = 0
local lines
local tls_low_byte
local cipher_list_2send
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
local sock_reply_file2 sock_reply_file3
2017-10-03 22:10:09 +02:00
local tls_hello_ascii next_packet
local process_full = " $3 " offer_compression = false skip = false
2017-11-07 22:07:30 +01:00
local close_connection = true
2017-10-03 22:10:09 +02:00
local -i hello_done = 0
2015-09-17 15:30:15 +02:00
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
[ [ " $5 " = = "true" ] ] && offer_compression = true
2017-11-07 22:07:30 +01:00
[ [ " $6 " = = "false" ] ] && close_connection = false
2015-09-17 15:30:15 +02:00
tls_low_byte = " $1 "
if [ [ -n " $2 " ] ] ; then # use supplied string in arg2 if there is one
cipher_list_2send = " $2 "
else # otherwise use std ciphers then
if [ [ " $tls_low_byte " = = "03" ] ] ; then
cipher_list_2send = " $TLS12_CIPHER "
else
cipher_list_2send = " $TLS_CIPHER "
fi
fi
2017-07-11 21:10:40 +02:00
code2network " $( tolower " $cipher_list_2send " ) " # convert CIPHER_SUITES to a "standardized" format
cipher_list_2send = " $NW_STR "
2015-09-17 15:30:15 +02:00
2017-07-26 22:37:50 +02:00
debugme echo -en "\nsending client hello... "
2017-10-03 22:10:09 +02:00
socksend_tls_clienthello " $tls_low_byte " " $cipher_list_2send " " $process_full " " $4 " " $offer_compression "
2015-09-17 15:30:15 +02:00
ret = $? # 6 means opening socket didn't succeed, e.g. timeout
# if sending didn't succeed we don't bother
if [ [ $ret -eq 0 ] ] ; then
sockread_serverhello 32768
2017-10-31 11:27:19 +01:00
" $TLS_DIFFTIME_SET " && TLS_NOW = $( LC_ALL = C date "+%s" )
2016-11-04 02:54:56 +01:00
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
tls_hello_ascii = $( hexdump -v -e '16/1 "%02X"' " $SOCK_REPLY_FILE " )
tls_hello_ascii = " ${ tls_hello_ascii %%[!0-9A-F]* } "
2017-10-03 22:10:09 +02:00
# Check if the response is a HelloRetryRequest.
resend_if_hello_retry_request " $tls_hello_ascii " " $cipher_list_2send " " $4 " " $process_full "
ret = $?
if [ [ $ret -eq 2 ] ] ; then
tls_hello_ascii = $( hexdump -v -e '16/1 "%02X"' " $SOCK_REPLY_FILE " )
tls_hello_ascii = " ${ tls_hello_ascii %%[!0-9A-F]* } "
elif [ [ $ret -eq 1 ] ] || [ [ $ret -eq 6 ] ] ; then
close_socket
TMPFILE = $SOCK_REPLY_FILE
tmpfile_handle $FUNCNAME .dd
return $ret
fi
# The server's response may span more than one packet. If only the
# first part of the response needs to be processed, this isn't an
# issue. However, if the entire response needs to be processed or
# if the ephemeral key is needed (which comes last for TLS 1.2 and
# below), then we need to check if response appears to be complete,
# and if it isn't then try to get another packet from the server.
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
if [ [ " $process_full " = = "all" ] ] || [ [ " $process_full " = = "ephemeralkey" ] ] ; then
2017-10-03 22:10:09 +02:00
hello_done = 1; skip = true
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
fi
for ( ( 1 ; hello_done = = 1; 1 ) ) ; do
2017-10-03 22:10:09 +02:00
if ! " $skip " ; then
2017-10-10 20:48:36 +02:00
if [ [ $DEBUG -ge 1 ] ] ; then
sock_reply_file2 = $( mktemp $TEMPDIR /ddreply.XXXXXX) || return 7
mv " $SOCK_REPLY_FILE " " $sock_reply_file2 "
fi
2017-10-03 22:10:09 +02:00
debugme echo -n "requesting more server hello data... "
socksend "" $USLEEP_SND
sockread_serverhello 32768
next_packet = $( hexdump -v -e '16/1 "%02X"' " $SOCK_REPLY_FILE " )
next_packet = " ${ next_packet %%[!0-9A-F]* } "
if [ [ ${# next_packet } -eq 0 ] ] ; then
# This shouldn't be necessary. However, it protects against
# getting into an infinite loop if the server has nothing
# left to send and check_tls_serverhellodone doesn't
# correctly catch it.
2017-10-10 20:48:36 +02:00
[ [ $DEBUG -ge 1 ] ] && mv " $sock_reply_file2 " " $SOCK_REPLY_FILE "
2017-10-03 22:10:09 +02:00
hello_done = 0
else
tls_hello_ascii += " $next_packet "
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
2017-10-10 20:48:36 +02:00
if [ [ $DEBUG -ge 1 ] ] ; then
sock_reply_file3 = $( mktemp $TEMPDIR /ddreply.XXXXXX) || return 7
mv " $SOCK_REPLY_FILE " " $sock_reply_file3 "
mv " $sock_reply_file2 " " $SOCK_REPLY_FILE "
cat " $sock_reply_file3 " >> " $SOCK_REPLY_FILE "
rm " $sock_reply_file3 "
fi
2017-10-03 22:10:09 +02:00
fi
fi
skip = false
if [ [ $hello_done -eq 1 ] ] ; then
check_tls_serverhellodone " $tls_hello_ascii " " $process_full "
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
hello_done = $?
2017-10-11 17:47:00 +02:00
if [ [ " $hello_done " -eq 3 ] ] ; then
# The following three lines are temporary until the code
# to decrypt TLSv1.3 responses has been added, at which point
# parse_tls_serverhello() will be called with process_full="all"
# and parse_tls_serverhello() will populate these files.
process_full = "ephemeralkey"
[ [ -e " $HOSTCERT " ] ] && rm " $HOSTCERT "
[ [ -e " $TEMPDIR /intermediatecerts.pem " ] ] && rm " $TEMPDIR /intermediatecerts.pem "
fi
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
fi
done
2017-07-26 22:37:50 +02:00
debugme echo "reading server hello..."
2016-03-12 17:08:43 +01:00
if [ [ " $DEBUG " -ge 4 ] ] ; then
2015-09-17 15:30:15 +02:00
hexdump -C $SOCK_REPLY_FILE | head -6
echo
fi
2017-07-11 21:10:40 +02:00
parse_tls_serverhello " $tls_hello_ascii " " $process_full " " $cipher_list_2send "
2015-09-17 15:30:15 +02:00
save = $?
2017-11-07 22:07:30 +01:00
if " $close_connection " && [ [ $save = = 0 ] ] ; then
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
debugme echo "sending close_notify..."
if [ [ " $DETECTED_TLS_VERSION " = = "0300" ] ] ; then
socksend ",x15, x03, x00, x00, x02, x02, x00" 0
else
socksend ",x15, x03, x01, x00, x02, x02, x00" 0
fi
fi
2017-07-26 22:37:50 +02:00
if [ [ $DEBUG -ge 2 ] ] ; then
# see https://secure.wand.net.nz/trac/libprotoident/wiki/SSL
lines = $( count_lines " $( hexdump -C " $SOCK_REPLY_FILE " 2>$ERRFILE ) " )
tm_out " ( $lines lines returned) "
fi
2015-09-17 15:30:15 +02:00
# determine the return value for higher level, so that they can tell what the result is
if [ [ $save -eq 1 ] ] || [ [ $lines -eq 1 ] ] ; then
ret = 1 # NOT available
else
if [ [ 03$tls_low_byte -eq $DETECTED_TLS_VERSION ] ] ; then
ret = 0 # protocol available, TLS version returned equal to the one send
else
2017-07-26 22:37:50 +02:00
debugme echo -n " protocol send: 0x03 $tls_low_byte , returned: 0x $DETECTED_TLS_VERSION "
2015-09-17 15:30:15 +02:00
ret = 2 # protocol NOT available, server downgraded to $DETECTED_TLS_VERSION
fi
fi
2017-07-26 22:37:50 +02:00
debugme echo
2015-09-17 15:30:15 +02:00
else
2017-01-25 14:57:20 +01:00
debugme echo " stuck on sending: $ret "
2015-09-17 15:30:15 +02:00
fi
2017-11-07 22:07:30 +01:00
" $close_connection " && close_socket
2017-04-22 15:39:18 +02:00
tmpfile_handle $FUNCNAME .dd $SOCK_REPLY_FILE
2015-09-17 15:30:15 +02:00
return $ret
2015-05-17 22:43:53 +02:00
}
####### vulnerabilities follow #######
2015-06-22 18:32:40 +02:00
# general overview which browser "supports" which vulnerability:
2015-05-17 22:43:53 +02:00
# http://en.wikipedia.org/wiki/Transport_Layer_Security-SSL#Web_browsers
# mainly adapted from https://gist.github.com/takeshixx/10107280
2015-07-22 13:11:20 +02:00
run_heartbleed( ) {
2017-11-07 22:07:30 +01:00
local tls_hexcode
local heartbleed_payload
2016-09-06 17:38:54 +02:00
local -i n ret lines_returned
2016-09-06 08:32:05 +02:00
local append = ""
2017-02-14 19:45:14 +01:00
local tls_hello_ascii = ""
2016-11-17 23:27:27 +01:00
local cve = "CVE-2014-0160"
local cwe = "CWE-119"
2016-11-23 09:46:11 +01:00
local hint = ""
2016-09-06 08:32:05 +02:00
2016-01-23 19:18:33 +01:00
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for heartbleed vulnerability " && outln
2016-11-17 23:27:27 +01:00
pr_bold " Heartbleed" ; out " ( $cve ) "
2015-05-17 22:43:53 +02:00
2015-10-11 23:07:16 +02:00
[ [ -z " $TLS_EXTENSIONS " ] ] && determine_tls_extensions
2017-10-09 15:13:46 +02:00
if [ [ ! " ${ TLS_EXTENSIONS } " = ~ heartbeat ] ] ; then
2016-03-01 20:39:30 +01:00
pr_done_best "not vulnerable (OK)"
2016-09-06 08:32:05 +02:00
outln ", no heartbeat extension"
2016-11-17 23:27:27 +01:00
fileout "heartbleed" "OK" "Heartbleed: not vulnerable, no heartbeat extension" " $cve " " $cwe "
2015-10-11 23:07:16 +02:00
return 0
fi
2015-09-17 15:30:15 +02:00
2017-10-02 13:48:55 +02:00
if [ [ 0 -eq $( has_server_protocol tls1) ] ] ; then
2017-04-19 00:30:09 +02:00
tls_hexcode = "x03, x01"
2017-10-02 13:48:55 +02:00
elif [ [ 0 -eq $( has_server_protocol tls1_1) ] ] ; then
2017-04-19 00:30:09 +02:00
tls_hexcode = "x03, x02"
2017-10-02 13:48:55 +02:00
elif [ [ 0 -eq $( has_server_protocol tls1_2) ] ] ; then
2017-04-19 00:30:09 +02:00
tls_hexcode = "x03, x03"
2017-10-02 13:48:55 +02:00
elif [ [ 0 -eq $( has_server_protocol ssl3) ] ] ; then
2017-04-19 00:30:09 +02:00
tls_hexcode = "x03, x00"
else # no protcol for some reason defined, determine TLS versions offered with a new handshake
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY " ) >$TMPFILE 2>$ERRFILE </dev/null
2017-04-19 00:30:09 +02:00
case " $( get_protocol $TMPFILE ) " in
2017-10-02 13:48:55 +02:00
*1.2) tls_hexcode = "x03, x03" ; add_tls_offered tls1_2 yes ; ;
*1.1) tls_hexcode = "x03, x02" ; add_tls_offered tls1_1 yes ; ;
TLSv1) tls_hexcode = "x03, x01" ; add_tls_offered tls1 yes ; ;
SSLv3) tls_hexcode = "x03, x00" ; add_tls_offered ssl3 yes ; ;
2017-04-19 00:30:09 +02:00
esac
fi
debugme echo " using protocol $tls_hexcode "
2015-09-17 15:30:15 +02:00
heartbleed_payload = " , x18, $tls_hexcode , x00, x03, x01, x40, x00 "
2017-11-07 22:07:30 +01:00
tls_sockets " ${ tls_hexcode : 6 : 2 } " "" "" "" "" "false"
2015-09-17 15:30:15 +02:00
2017-11-07 22:07:30 +01:00
[ [ $DEBUG -ge 4 ] ] && tmln_out " \nsending payload with TLS version $tls_hexcode : "
2017-02-14 19:45:14 +01:00
socksend " $heartbleed_payload " 1
sockread_serverhello 16384 $HEARTBLEED_MAX_WAITSOCK
if [ [ $? -eq 3 ] ] ; then
append = ", timed out"
pr_done_best "not vulnerable (OK)" ; out " $append "
fileout "heartbleed" "OK" " Heartbleed: not vulnerable $append " " $cve " " $cwe "
ret = 0
else
2015-09-17 15:30:15 +02:00
2017-02-14 19:45:14 +01:00
# server reply should be (>=SSLv3): 18030x in case of a heartBEAT reply -- which we take as a positive result
tls_hello_ascii = $( hexdump -v -e '16/1 "%02X"' " $SOCK_REPLY_FILE " )
debugme echo " tls_content_type: ${ tls_hello_ascii : 0 : 2 } "
debugme echo " tls_protocol: ${ tls_hello_ascii : 2 : 4 } "
lines_returned = $( count_lines " $( hexdump -ve '16/1 "%02x " " \n"' " $SOCK_REPLY_FILE " ) " )
debugme echo " lines HB reply: $lines_returned "
2015-09-17 15:30:15 +02:00
2016-09-06 08:32:05 +02:00
if [ [ $DEBUG -ge 3 ] ] ; then
2017-02-25 16:31:30 +01:00
tmln_out "\nheartbleed reply: "
2016-09-06 17:38:54 +02:00
hexdump -C " $SOCK_REPLY_FILE " | head -20
2017-02-25 16:31:30 +01:00
[ [ $lines_returned -gt 20 ] ] && tmln_out "[...]"
tmln_out
2016-09-06 08:32:05 +02:00
fi
2017-02-14 19:45:14 +01:00
if [ [ $lines_returned -gt 1 ] ] && [ [ " ${ tls_hello_ascii : 0 : 4 } " = = "1803" ] ] ; then
2016-09-06 08:32:05 +02:00
if [ [ " $STARTTLS_PROTOCOL " = = "ftp" ] ] || [ [ " $STARTTLS_PROTOCOL " = = "ftps" ] ] ; then
2017-02-14 19:45:14 +01:00
# check possibility of weird vsftpd reply, see #426, despite "1803" seems very unlikely...
if grep -q '500 OOPS' " $SOCK_REPLY_FILE " ; then
append = ", successful weeded out vsftpd false positive"
pr_done_best "not vulnerable (OK)" ; out " $append "
fileout "heartbleed" "OK" " Heartbleed: not vulnerable $append " " $cve " " $cwe "
ret = 0
else
out "likely "
pr_svrty_critical "VULNERABLE (NOT ok)"
2017-04-13 20:28:39 +02:00
[ [ $DEBUG -lt 3 ] ] && tm_out ", use debug >=3 to confirm"
2017-02-14 19:45:14 +01:00
fileout "heartbleed" "CRITICAL" " Heartbleed: VULNERABLE $cve " " $cwe " " $hint "
ret = 1
fi
2016-09-06 08:32:05 +02:00
else
pr_svrty_critical "VULNERABLE (NOT ok)"
2017-02-14 19:45:14 +01:00
fileout "heartbleed" "CRITICAL" " Heartbleed: VULNERABLE $cve " " $cwe " " $hint "
2016-09-06 08:32:05 +02:00
ret = 1
fi
2016-01-23 19:18:33 +01:00
else
2016-09-06 08:32:05 +02:00
pr_done_best "not vulnerable (OK)"
2017-02-14 19:45:14 +01:00
fileout "heartbleed" "OK" " Heartbleed: not vulnerable $cve " " $cwe "
2016-09-06 08:32:05 +02:00
ret = 0
2016-01-23 19:18:33 +01:00
fi
2015-09-17 15:30:15 +02:00
fi
2017-02-14 19:45:14 +01:00
outln
2017-04-22 15:39:18 +02:00
tmpfile_handle $FUNCNAME .dd $SOCK_REPLY_FILE
2017-02-14 19:45:14 +01:00
close_socket
2015-09-17 15:30:15 +02:00
return $ret
2015-05-17 22:43:53 +02:00
}
# helper function
ok_ids( ) {
2017-02-25 16:31:30 +01:00
prln_done_best "\n ok -- something resetted our ccs packets"
2015-09-17 15:30:15 +02:00
return 0
2015-05-17 22:43:53 +02:00
}
2017-11-07 22:07:30 +01:00
#FIXME: At a certain point ccs needs to be changed and make use of code2network using a file, then tls_sockets
2015-07-22 13:11:20 +02:00
run_ccs_injection( ) {
2017-04-18 23:15:32 +02:00
local tls_hexcode ccs_message client_hello byte6 sockreply
2017-02-14 21:56:31 +01:00
local -i retval ret
2017-02-14 19:45:14 +01:00
local tls_hello_ascii = ""
2016-11-17 23:27:27 +01:00
local cve = "CVE-2014-0224"
local cwe = "CWE-310"
2016-11-23 09:46:11 +01:00
local hint = ""
Replace sockread() with sockread_serverhello()
This PR is in response to issue #352, where it was noted that Bash does not support binary data in strings.
I replaced all calls to `sockread()` with calls to `sockread_serverhello()`, and then, since is now used everywhere and not just to read ServerHello messages, I renamed `sockread_serverhello()` to `sockread()`.
I tested the revised code against several servers, including one that is vulnerable to CCS and Heartbleed, and got the same results as with the current code (although the hexdumps displayed in debug mode differ).
One concern I have is the code in `run_ccs_injection()`. The current code is:
```
byte6=$(echo "$SOCKREPLY" | "${HEXDUMPPLAIN[@]}" | sed 's/^..........//')
lines=$(echo "$SOCKREPLY" | "${HEXDUMP[@]}" | count_lines )
debugme echo "lines: $lines, byte6: $byte6"
if [[ "$byte6" == "0a" ]] || [[ "$lines" -gt 1 ]]; then
pr_done_best "not vulnerable (OK)"
...
```
I revised this to:
```
if [[ -s "$SOCK_REPLY_FILE" ]]; then
byte6=$(hexdump -ve '1/1 "%.2x"' "$SOCK_REPLY_FILE" | sed 's/^..........//')
lines=$(hexdump -ve '16/1 "%02x " " \n"' "$SOCK_REPLY_FILE" | count_lines )
debugme echo "lines: $lines, byte6: $byte6"
fi
rm "$SOCK_REPLY_FILE"
if [[ "$byte6" == "0a" ]] || [[ "$lines" -gt 1 ]]; then
...
```
In the revised code `byte6` is initialized to `0a` so that the response is `not vulnerable (OK)` if `$SOCK_REPLY_FILE` is empty. This has worked okay since for all of the servers that I tested that weren't vulnerable `$SOCK_REPLY_FILE` was empty. Since I haven't seen any other examples, I don't understand why check for vulnerability was written the way it was. So, I'm a bit concerned that the test in the revised code may produce incorrect results now that `hexdump -ve '1/1 "%.2x"' "$SOCK_REPLY_FILE"` is an accurate hexdump of the reply.
2016-08-10 22:14:32 +02:00
2015-09-17 15:30:15 +02:00
# see https://www.openssl.org/news/secadv_20140605.txt
# mainly adapted from Ramon de C Valle's C code from https://gist.github.com/rcvalle/71f4b027d61a78c42607
2015-11-08 22:14:28 +01:00
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for CCS injection vulnerability " && outln
2016-11-17 23:27:27 +01:00
pr_bold " CCS" ; out " ( $cve ) "
2015-05-17 22:43:53 +02:00
2017-10-02 13:48:55 +02:00
if [ [ 0 -eq $( has_server_protocol tls1) ] ] ; then
2017-04-18 23:15:32 +02:00
tls_hexcode = "x03, x01"
2017-10-02 13:48:55 +02:00
elif [ [ 0 -eq $( has_server_protocol tls1_1) ] ] ; then
2017-04-18 23:15:32 +02:00
tls_hexcode = "x03, x02"
2017-10-02 13:48:55 +02:00
elif [ [ 0 -eq $( has_server_protocol tls1_2) ] ] ; then
2017-04-18 23:15:32 +02:00
tls_hexcode = "x03, x03"
2017-10-02 13:48:55 +02:00
elif [ [ 0 -eq $( has_server_protocol ssl3) ] ] ; then
2017-04-18 23:15:32 +02:00
tls_hexcode = "x03, x00"
2017-10-02 13:48:55 +02:00
else # no protcol for some reason defined, determine TLS versions offered with a new handshake
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY " ) >$TMPFILE 2>$ERRFILE </dev/null
2017-04-18 23:15:32 +02:00
case " $( get_protocol $TMPFILE ) " in
2017-10-02 13:48:55 +02:00
*1.2) tls_hexcode = "x03, x03" ; add_tls_offered tls1_2 yes ; ;
*1.1) tls_hexcode = "x03, x02" ; add_tls_offered tls1_1 yes ; ;
TLSv1) tls_hexcode = "x03, x01" ; add_tls_offered tls1 yes ; ;
SSLv3) tls_hexcode = "x03, x00" ; add_tls_offered ssl3 yes ; ;
2017-04-18 23:15:32 +02:00
esac
fi
debugme echo " using protocol $tls_hexcode "
2015-09-17 15:30:15 +02:00
ccs_message = " , x14, $tls_hexcode ,x00, x01, x01 "
client_hello = "
# TLS header (5 bytes)
2015-10-11 23:07:16 +02:00
,x16, # content type (x16 for handshake)
x03, x01, # TLS version in record layer is always TLS 1.0 (except SSLv3)
x00, x93, # length
2015-09-17 15:30:15 +02:00
# Handshake header
2015-10-11 23:07:16 +02:00
x01, # type (x01 for ClientHello)
x00, x00, x8f, # length
$tls_hexcode , # TLS version
2015-09-17 15:30:15 +02:00
# Random (32 byte)
x53, x43, x5b, x90, x9d, x9b, x72, x0b,
xbc, x0c, xbc, x2b, x92, xa8, x48, x97,
xcf, xbd, x39, x04, xcc, x16, x0a, x85,
x03, x90, x9f, x77, x04, x33, xd4, xde,
x00, # session ID length
x00, x68, # cipher suites length
# Cipher suites (51 suites)
xc0, x13, xc0, x12, xc0, x11, xc0, x10,
xc0, x0f, xc0, x0e, xc0, x0d, xc0, x0c,
xc0, x0b, xc0, x0a, xc0, x09, xc0, x08,
xc0, x07, xc0, x06, xc0, x05, xc0, x04,
xc0, x03, xc0, x02, xc0, x01, x00, x39,
x00, x38, x00, x37, x00, x36, x00, x35, x00, x34,
x00, x33, x00, x32, x00, x31, x00, x30,
x00, x2f, x00, x16, x00, x15, x00, x14,
x00, x13, x00, x12, x00, x11, x00, x10,
x00, x0f, x00, x0e, x00, x0d, x00, x0c,
x00, x0b, x00, x0a, x00, x09, x00, x08,
x00, x07, x00, x06, x00, x05, x00, x04,
x00, x03, x00, x02, x00, x01, x01, x00"
fd_socket 5 || return 6
2015-05-17 22:43:53 +02:00
2015-07-07 22:59:31 +02:00
# we now make a standard handshake ...
2017-07-26 22:37:50 +02:00
debugme echo -n "sending client hello... "
2015-09-17 15:30:15 +02:00
socksend " $client_hello " 1
2017-07-26 22:37:50 +02:00
debugme echo "reading server hello... "
2016-08-31 23:03:50 +02:00
sockread_serverhello 32768
2016-03-12 17:08:43 +01:00
if [ [ $DEBUG -ge 4 ] ] ; then
Replace sockread() with sockread_serverhello()
This PR is in response to issue #352, where it was noted that Bash does not support binary data in strings.
I replaced all calls to `sockread()` with calls to `sockread_serverhello()`, and then, since is now used everywhere and not just to read ServerHello messages, I renamed `sockread_serverhello()` to `sockread()`.
I tested the revised code against several servers, including one that is vulnerable to CCS and Heartbleed, and got the same results as with the current code (although the hexdumps displayed in debug mode differ).
One concern I have is the code in `run_ccs_injection()`. The current code is:
```
byte6=$(echo "$SOCKREPLY" | "${HEXDUMPPLAIN[@]}" | sed 's/^..........//')
lines=$(echo "$SOCKREPLY" | "${HEXDUMP[@]}" | count_lines )
debugme echo "lines: $lines, byte6: $byte6"
if [[ "$byte6" == "0a" ]] || [[ "$lines" -gt 1 ]]; then
pr_done_best "not vulnerable (OK)"
...
```
I revised this to:
```
if [[ -s "$SOCK_REPLY_FILE" ]]; then
byte6=$(hexdump -ve '1/1 "%.2x"' "$SOCK_REPLY_FILE" | sed 's/^..........//')
lines=$(hexdump -ve '16/1 "%02x " " \n"' "$SOCK_REPLY_FILE" | count_lines )
debugme echo "lines: $lines, byte6: $byte6"
fi
rm "$SOCK_REPLY_FILE"
if [[ "$byte6" == "0a" ]] || [[ "$lines" -gt 1 ]]; then
...
```
In the revised code `byte6` is initialized to `0a` so that the response is `not vulnerable (OK)` if `$SOCK_REPLY_FILE` is empty. This has worked okay since for all of the servers that I tested that weren't vulnerable `$SOCK_REPLY_FILE` was empty. Since I haven't seen any other examples, I don't understand why check for vulnerability was written the way it was. So, I'm a bit concerned that the test in the revised code may produce incorrect results now that `hexdump -ve '1/1 "%.2x"' "$SOCK_REPLY_FILE"` is an accurate hexdump of the reply.
2016-08-10 22:14:32 +02:00
hexdump -C " $SOCK_REPLY_FILE " | head -20
2017-02-25 16:31:30 +01:00
tmln_out "[...]"
tm_out " \nsending payload #1 with TLS version $tls_hexcode : "
2015-09-17 15:30:15 +02:00
fi
Replace sockread() with sockread_serverhello()
This PR is in response to issue #352, where it was noted that Bash does not support binary data in strings.
I replaced all calls to `sockread()` with calls to `sockread_serverhello()`, and then, since is now used everywhere and not just to read ServerHello messages, I renamed `sockread_serverhello()` to `sockread()`.
I tested the revised code against several servers, including one that is vulnerable to CCS and Heartbleed, and got the same results as with the current code (although the hexdumps displayed in debug mode differ).
One concern I have is the code in `run_ccs_injection()`. The current code is:
```
byte6=$(echo "$SOCKREPLY" | "${HEXDUMPPLAIN[@]}" | sed 's/^..........//')
lines=$(echo "$SOCKREPLY" | "${HEXDUMP[@]}" | count_lines )
debugme echo "lines: $lines, byte6: $byte6"
if [[ "$byte6" == "0a" ]] || [[ "$lines" -gt 1 ]]; then
pr_done_best "not vulnerable (OK)"
...
```
I revised this to:
```
if [[ -s "$SOCK_REPLY_FILE" ]]; then
byte6=$(hexdump -ve '1/1 "%.2x"' "$SOCK_REPLY_FILE" | sed 's/^..........//')
lines=$(hexdump -ve '16/1 "%02x " " \n"' "$SOCK_REPLY_FILE" | count_lines )
debugme echo "lines: $lines, byte6: $byte6"
fi
rm "$SOCK_REPLY_FILE"
if [[ "$byte6" == "0a" ]] || [[ "$lines" -gt 1 ]]; then
...
```
In the revised code `byte6` is initialized to `0a` so that the response is `not vulnerable (OK)` if `$SOCK_REPLY_FILE` is empty. This has worked okay since for all of the servers that I tested that weren't vulnerable `$SOCK_REPLY_FILE` was empty. Since I haven't seen any other examples, I don't understand why check for vulnerability was written the way it was. So, I'm a bit concerned that the test in the revised code may produce incorrect results now that `hexdump -ve '1/1 "%.2x"' "$SOCK_REPLY_FILE"` is an accurate hexdump of the reply.
2016-08-10 22:14:32 +02:00
rm " $SOCK_REPLY_FILE "
2015-07-07 22:59:31 +02:00
# ... and then send the a change cipher spec message
2015-09-17 15:30:15 +02:00
socksend " $ccs_message " 1 || ok_ids
2017-02-14 21:56:31 +01:00
sockread_serverhello 4096 $CCS_MAX_WAITSOCK
2015-09-17 15:30:15 +02:00
if [ [ $DEBUG -ge 3 ] ] ; then
2017-02-25 16:31:30 +01:00
tmln_out "\n1st reply: "
2016-08-31 23:03:50 +02:00
hexdump -C " $SOCK_REPLY_FILE " | head -20
2017-02-25 16:31:30 +01:00
tmln_out
tm_out " sending payload #2 with TLS version $tls_hexcode : "
2015-09-17 15:30:15 +02:00
fi
2016-08-31 23:03:50 +02:00
rm " $SOCK_REPLY_FILE "
2015-05-17 22:43:53 +02:00
2015-09-17 15:30:15 +02:00
socksend " $ccs_message " 2 || ok_ids
2017-02-14 21:56:31 +01:00
sockread_serverhello 4096 $CCS_MAX_WAITSOCK
2015-09-17 15:30:15 +02:00
retval = $?
2015-05-17 22:43:53 +02:00
2017-02-14 19:45:14 +01:00
tls_hello_ascii = $( hexdump -v -e '16/1 "%02X"' " $SOCK_REPLY_FILE " )
2017-02-14 21:56:31 +01:00
byte6 = " ${ tls_hello_ascii : 12 : 2 } "
debugme echo " tls_content_type: ${ tls_hello_ascii : 0 : 2 } | tls_protocol: ${ tls_hello_ascii : 2 : 4 } | byte6: $byte6 "
2017-02-14 19:45:14 +01:00
2015-09-17 15:30:15 +02:00
if [ [ $DEBUG -ge 3 ] ] ; then
2017-02-25 16:31:30 +01:00
tmln_out "\n2nd reply: "
2017-02-14 19:45:14 +01:00
hexdump -C " $SOCK_REPLY_FILE "
2017-02-25 16:31:30 +01:00
tmln_out
2015-09-17 15:30:15 +02:00
fi
2017-02-20 09:44:52 +01:00
# in general, see https://en.wikipedia.org/wiki/Transport_Layer_Security#Alert_protocol
# https://tools.ietf.org/html/rfc5246#section-7.2
2017-02-14 19:45:14 +01:00
#
2017-07-19 18:46:46 +02:00
# not ok for CCSI: 15 | 0301 | 00 02 | 02 15
# ALERT | TLS 1.0 | Length=2 | Decryption failed (21)
2017-02-20 09:44:52 +01:00
#
# ok: nothing: ==> RST
#
# 0A: Unexpected message
# 28: Handshake failure
2017-02-14 21:56:31 +01:00
if [ [ -z " ${ tls_hello_ascii : 0 : 12 } " ] ] ; then
# empty reply
2016-03-01 20:39:30 +01:00
pr_done_best "not vulnerable (OK)"
2016-01-23 19:18:33 +01:00
if [ [ $retval -eq 3 ] ] ; then
2016-11-17 23:27:27 +01:00
fileout "ccs" "OK" "CCS: not vulnerable (timed out)" " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
else
2016-11-17 23:27:27 +01:00
fileout "ccs" "OK" "CCS: not vulnerable" " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
fi
2015-09-17 15:30:15 +02:00
ret = 0
2017-02-14 21:56:31 +01:00
elif [ [ " $byte6 " = = "15" ] ] && [ [ " ${ tls_hello_ascii : 0 : 4 } " = = "1503" ] ] ; then
2017-07-13 01:54:24 +02:00
# decryption failed received
2016-03-01 20:31:26 +01:00
pr_svrty_critical "VULNERABLE (NOT ok)"
2017-02-20 09:44:52 +01:00
fileout "ccs" "CRITICAL" "CCS: VULNERABLE" " $cve " " $cwe " " $hint "
2015-09-17 15:30:15 +02:00
ret = 1
2017-02-20 09:44:52 +01:00
elif [ [ " ${ tls_hello_ascii : 0 : 4 } " = = "1503" ] ] ; then
if [ [ " $byte6 " = = "0A" ] ] || [ [ " $byte6 " = = "28" ] ] ; then
# Unexpected message / Handshake failure received
pr_warning "likely "
out "not vulnerable (OK)"
out " - alert description type: $byte6 "
fileout "ccs" "WARN" " CCS: probably not vulnerable but received 0x ${ byte6 } instead of 0x15 " " $cve " " $cwe " " $hint "
fi
2017-07-13 01:54:24 +02:00
elif [ [ $STARTTLS_PROTOCOL = = "mysql" ] ] && [ [ " ${ tls_hello_ascii : 14 : 12 } " = = "233038533031" ] ] ; then
# MySQL community edition (yaSSL) returns a MySQL error instead of a TLS Alert
# Error: #08S01 Bad handshake
pr_done_best "not vulnerable (OK)"
2017-07-19 18:46:46 +02:00
out ", looks like MySQL community edition (yaSSL)"
fileout "ccs" "OK" "CCS: not vulnerable (MySQL community edition (yaSSL) detected)" " $cve " " $cwe "
2017-02-14 21:56:31 +01:00
elif [ [ " $byte6 " = = [ 0-9a-f] [ 0-9a-f] ] ] && [ [ " ${ tls_hello_ascii : 2 : 2 } " != "03" ] ] ; then
pr_warning "test failed"
out " , probably read buffer too small ( ${ tls_hello_ascii : 0 : 14 } ) "
2017-10-31 12:23:16 +01:00
fileout "ccs" "DEBUG" " CCS: test failed, probably read buffer too small ( ${ tls_hello_ascii : 0 : 14 } ) " " $cve " " $cwe " " $hint "
2017-02-14 21:56:31 +01:00
ret = 7
else
pr_warning "test failed "
2017-02-20 09:44:52 +01:00
out " around line $LINENO (debug info: ${ tls_hello_ascii : 0 : 12 } , $byte6 ) "
2017-10-31 12:23:16 +01:00
fileout "ccs" "DEBUG" " CCS: test failed, around line $LINENO , debug info ( ${ tls_hello_ascii : 0 : 12 } , $byte6 ) " " $cve " " $cwe " " $hint "
2017-02-14 21:56:31 +01:00
ret = 7
2015-09-17 15:30:15 +02:00
fi
outln
2017-04-22 15:39:18 +02:00
tmpfile_handle $FUNCNAME .dd $SOCK_REPLY_FILE
2015-09-17 15:30:15 +02:00
close_socket
return $ret
2015-05-17 22:43:53 +02:00
}
2017-04-18 23:15:32 +02:00
get_session_ticket_tls( ) {
local sessticket_tls = ""
#FIXME: we likely have done this already before (either @ run_server_defaults() or at least the output from a previous handshake) --> would save 1x connect
2017-05-09 17:29:57 +02:00
#ATTENTION: we DO NOT do SNI here as we assume this is a vulnerabilty of the TLS stack. If we do SNI here, we'd also need to do it in the ClientHello
2017-04-18 23:15:32 +02:00
# of run_ticketbleed() otherwise the ticket will be different and the whole thing won't work!
2017-09-19 18:37:03 +02:00
sessticket_tls = " $( $OPENSSL s_client $( s_client_options " $BUGS $OPTIMAL_PROTO $PROXY -connect $NODEIP : $PORT " ) </dev/null 2>$ERRFILE | awk '/TLS session ticket:/,/^$/' | awk '!/TLS session ticket/' ) "
2017-04-18 23:15:32 +02:00
sessticket_tls = " $( sed -e 's/^.* - /x/g' -e 's/ .*$//g' <<< " $sessticket_tls " | tr '\n' ',' ) "
sed -e 's/ /,x/g' -e 's/-/,x/g' <<< " $sessticket_tls "
}
# see https://blog.filippo.io/finding-ticketbleed/ | http://ticketbleed.com/
run_ticketbleed( ) {
local session_tckt_tls = ""
2017-05-09 17:29:57 +02:00
local -i len_ch = 300 # fixed len of prepared clienthello below
2017-04-18 23:15:32 +02:00
local sid = "x00,x0B,xAD,xC0,xDE,x00," # some abitratry bytes
local len_sid = " $(( ${# sid } / 4 )) "
local xlen_sid = " $( dec02hex $len_sid ) "
2017-05-15 19:47:13 +02:00
local -i len_tckt_tls = 0 nr_sid_detected = 0
2017-04-18 23:15:32 +02:00
local xlen_tckt_tls = "" xlen_handshake_record_layer = "" xlen_handshake_ssl_layer = ""
local -i len_handshake_record_layer = 0
local cve = "CVE-2016-9244"
local cwe = "CWE-200"
local hint = ""
local tls_version = ""
2017-05-15 19:47:13 +02:00
local i
local -a memory sid_detected
local early_exit = true
local ret = 0
2017-04-18 23:15:32 +02:00
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for Ticketbleed vulnerability " && outln
pr_bold " Ticketbleed" ; out " ( $cve ), experiment. "
2017-06-12 18:23:55 +02:00
if [ [ " $SERVICE " != HTTP ] ] && ! " $CLIENT_AUTH " ; then
2017-05-15 13:18:20 +02:00
outln "-- (applicable only for HTTPS)"
fileout "ticketbleed" "INFO" "Ticketbleed: not applicable, not HTTP" " $cve " " $cwe "
return 0
fi
# highly unlikely that it is NOT supported. We may loose time here but it's more solid
[ [ -z " $TLS_EXTENSIONS " ] ] && determine_tls_extensions
2017-10-09 15:13:46 +02:00
if [ [ ! " ${ TLS_EXTENSIONS } " = ~ "session ticket" ] ] ; then
2017-05-15 13:18:20 +02:00
pr_done_best "not vulnerable (OK)"
outln ", no session ticket extension"
fileout "ticketbleed" "OK" "Ticketbleed: no session ticket extension" " $cve " " $cwe "
return 0
fi
2017-04-18 23:15:32 +02:00
2017-10-02 13:48:55 +02:00
if [ [ 0 -eq $( has_server_protocol tls1) ] ] ; then
2017-04-18 23:15:32 +02:00
tls_hexcode = "x03, x01"
2017-10-02 13:48:55 +02:00
elif [ [ 0 -eq $( has_server_protocol tls1_1) ] ] ; then
2017-04-18 23:15:32 +02:00
tls_hexcode = "x03, x02"
2017-10-02 13:48:55 +02:00
elif [ [ 0 -eq $( has_server_protocol tls1_2) ] ] ; then
2017-04-18 23:15:32 +02:00
tls_hexcode = "x03, x03"
2017-10-02 13:48:55 +02:00
elif [ [ 0 -eq $( has_server_protocol ssl3) ] ] ; then
2017-04-18 23:15:32 +02:00
tls_hexcode = "x03, x00"
2017-10-02 13:48:55 +02:00
else # no protcol for some reason defined, determine TLS versions offered with a new handshake
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY " ) >$TMPFILE 2>$ERRFILE </dev/null
2017-04-18 23:15:32 +02:00
case " $( get_protocol $TMPFILE ) " in
2017-10-02 13:48:55 +02:00
*1.2) tls_hexcode = "x03, x03" ; add_tls_offered tls1_2 yes ; ;
*1.1) tls_hexcode = "x03, x02" ; add_tls_offered tls1_1 yes ; ;
TLSv1) tls_hexcode = "x03, x01" ; add_tls_offered tls1 yes ; ;
SSLv3) tls_hexcode = "x03, x00" ; add_tls_offered ssl3 yes ; ;
2017-04-18 23:15:32 +02:00
esac
fi
2017-07-26 22:37:50 +02:00
debugme echo " using protocol $tls_hexcode "
2017-04-18 23:15:32 +02:00
session_tckt_tls = " $( get_session_ticket_tls) "
if [ [ " $session_tckt_tls " = = "," ] ] ; then
pr_done_best "not vulnerable (OK)"
outln ", no session tickets"
fileout "ticketbleed" "OK" "Ticketbleed: not vulnerable" " $cve " " $cwe "
2017-05-15 19:47:13 +02:00
debugme echo " session ticket TLS \" $session_tckt_tls \" "
2017-04-18 23:15:32 +02:00
return 0
fi
len_tckt_tls = ${# session_tckt_tls }
len_tckt_tls = $(( len_tckt_tls / 4 ))
xlen_tckt_tls = " $( dec02hex $len_tckt_tls ) "
len_handshake_record_layer = " $(( len_sid + len_ch + len_tckt_tls )) "
xlen_handshake_record_layer = " $( dec04hex " $len_handshake_record_layer " ) "
len_handshake_ssl_layer = " $(( len_handshake_record_layer + 4 )) "
xlen_handshake_ssl_layer = " $( dec04hex " $len_handshake_ssl_layer " ) "
2017-07-26 22:37:50 +02:00
if [ [ " $DEBUG " -ge 4 ] ] ; then
2017-04-18 23:15:32 +02:00
echo " len_tckt_tls (hex): $len_tckt_tls ( $xlen_tckt_tls ) "
echo " sid: $sid "
echo " len_sid (hex) $len_sid ( $xlen_sid ) "
echo " len_handshake_record_layer: $len_handshake_record_layer ( $xlen_handshake_record_layer ) "
echo " len_handshake_ssl_layer: $len_handshake_ssl_layer ( $xlen_handshake_ssl_layer ) "
echo " session_tckt_tls: $session_tckt_tls "
fi
client_hello = "
# TLS header (5 bytes)
,x16, # Content type (x16 for handshake)
2017-05-09 17:29:57 +02:00
x03,x01, # TLS version record layer
2017-04-18 23:15:32 +02:00
# Length Secure Socket Layer follows:
$xlen_handshake_ssl_layer ,
# Handshake header
x01, # Type (x01 for ClientHello)
2017-05-09 17:29:57 +02:00
# Length of ClientHello follows:
2017-04-18 23:15:32 +02:00
x00, $xlen_handshake_record_layer ,
$tls_hexcode , # TLS Version
# Random (32 byte) Unix time etc, see www.moserware.com/2009/06/first-few-milliseconds-of-https.html
xee, xee, x5b, x90, x9d, x9b, x72, x0b,
xbc, x0c, xbc, x2b, x92, xa8, x48, x97,
xcf, xbd, x39, x04, xcc, x16, x0a, x85,
x03, x90, x9f, x77, x04, x33, xff, xff,
$xlen_sid , # Session ID length
$sid
2017-05-09 17:29:57 +02:00
x00, x6a, # Cipher suites length 106
# 53 Cipher suites
xc0,x14, xc0,x13, xc0,x0a, xc0,x21,
x00,x39, x00,x38, x00,x88, x00,x87,
xc0,x0f, xc0,x05, x00,x35, x00,x84,
xc0,x12, xc0,x08, xc0,x1c, xc0,x1b,
x00,x16, x00,x13, xc0,x0d, xc0,x03,
x00,x0a, xc0,x13, xc0,x09, xc0,x1f,
xc0,x1e, x00,x33, x00,x32, x00,x9a,
x00,x99, x00,x45, x00,x44, xc0,x0e,
xc0,x04, x00,x2f, x00,x96, x00,x41,
xc0,x11, xc0,x07, xc0,x0c, xc0,x02,
x00,x05, x00,x04, x00,x15, x00,x12,
xc0,x30, xc0,x2f, x00,x9d, x00,x9c,
x00,x3d, x00,x3c, x00,x9f, x00,x9e,
x00,xff,
2017-04-18 23:15:32 +02:00
x01, # Compression methods length
x00, # Compression method (x00 for NULL)
2017-05-09 17:29:57 +02:00
x01,x5b, # Extensions length ####### 10b + x14 + x3c
# Extension Padding
x00,x15,
# length:
x00,x38,
x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00,
x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00,
x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00, x00,x00,
2017-04-18 23:15:32 +02:00
# Extension: ec_point_formats
2017-05-09 17:29:57 +02:00
x00,x0b,
# length:
x00,x04,
# data:
x03,x00, x01,x02,
2017-04-18 23:15:32 +02:00
# Extension: elliptic_curves
2017-05-09 17:29:57 +02:00
x00,x0a,
# length
x00,x34,
x00,x32,
# data:
x00,x0e, x00,x0d, x00,x19, x00,x0b, x00,x0c,
x00,x18, x00,x09, x00,x0a, x00,x16,
x00,x17, x00,x08, x00,x06, x00,x07,
x00,x14, x00,x15, x00,x04, x00,x05,
x00,x12, x00,x13, x00,x01, x00,x02,
x00,x03, x00,x0f, x00,x10, x00,x11,
# Extension: Signature Algorithms
x00,x0d,
# length:
x00,x10,
# data:
x00,x0e ,x04,x01, x05,x01 ,x02,x01, x04,x03, x05,x03,
x02,x03, x02,x02,
2017-04-18 23:15:32 +02:00
# Extension: SessionTicket TLS
x00, x23,
2017-05-09 17:29:57 +02:00
# length of SessionTicket TLS
2017-04-18 23:15:32 +02:00
x00, $xlen_tckt_tls ,
2017-05-09 17:29:57 +02:00
# data, Session Ticket
2017-04-18 23:15:32 +02:00
$session_tckt_tls # here we have the comma already
# Extension: Heartbeat
x00, x0f, x00, x01, x01"
2017-05-15 19:47:13 +02:00
# we do 3 client hellos, and see whether different memmory is returned
for i in 1 2 3; do
fd_socket 5 || return 6
2017-07-26 22:37:50 +02:00
debugme echo -n "sending client hello... "
2017-05-15 19:47:13 +02:00
socksend " $client_hello " 0
2017-04-18 23:15:32 +02:00
2017-07-26 22:37:50 +02:00
debugme echo "reading server hello (ticketbleed reply)... "
2017-05-15 19:47:13 +02:00
if " $FAST_SOCKET " ; then
tls_hello_ascii = $( sockread_fast 32768)
2017-04-18 23:15:32 +02:00
else
2017-05-15 19:47:13 +02:00
sockread_serverhello 32768 $CCS_MAX_WAITSOCK
tls_hello_ascii = $( hexdump -v -e '16/1 "%02X"' " $SOCK_REPLY_FILE " )
2017-04-18 23:15:32 +02:00
fi
2017-05-15 19:47:13 +02:00
[ [ " $DEBUG " -ge 5 ] ] && echo " $tls_hello_ascii "
if [ [ " $DEBUG " -ge 4 ] ] ; then
echo "============================="
echo " $tls_hello_ascii "
echo "============================="
2017-04-18 23:15:32 +02:00
fi
2017-05-15 19:47:13 +02:00
if [ [ " ${ tls_hello_ascii : 0 : 2 } " = = "15" ] ] ; then
debugme echo -n " TLS Alert ${ tls_hello_ascii : 10 : 4 } (TLS version: ${ tls_hello_ascii : 2 : 4 } ) -- "
2017-04-18 23:15:32 +02:00
pr_done_best "not vulnerable (OK)"
2017-05-15 19:47:13 +02:00
fileout "ticketbleed" "OK" "Ticketbleed: not vulnerable" " $cve " " $cwe "
break
elif [ [ -z " ${ tls_hello_ascii : 0 : 2 } " ] ] ; then
pr_done_best "not vulnerable (OK)"
out ", reply empty"
fileout "ticketbleed" "OK" "Ticketbleed: not vulnerable" " $cve " " $cwe "
break
elif [ [ " ${ tls_hello_ascii : 0 : 2 } " = = "16" ] ] ; then
early_exit = false
debugme echo -n " Handshake (TLS version: ${ tls_hello_ascii : 2 : 4 } ), "
if [ [ " ${ tls_hello_ascii : 10 : 6 } " = = 020000 ] ] ; then
debugme echo -n "ServerHello -- "
else
debugme echo -n " Message type: ${ tls_hello_ascii : 10 : 6 } -- "
fi
sid_input = $( sed -e 's/x//g' -e 's/,//g' <<< " $sid " )
sid_detected[ i] = " ${ tls_hello_ascii : 88 : 32 } "
memory[ i] = " ${ tls_hello_ascii : $(( 88 + len_sid*2)) : $(( 32 - len_sid*2)) } "
2017-07-26 22:37:50 +02:00
if [ [ " $DEBUG " -ge 3 ] ] ; then
2017-05-15 19:47:13 +02:00
echo
echo " TLS version, record layer: ${ tls_hello_ascii : 18 : 4 } "
echo " Session ID: ${ sid_detected [i] } "
echo " memory: ${ memory [i] } "
echo -n " $sid_input in SID: " ;
2017-10-09 15:13:46 +02:00
[ [ " ${ sid_detected [i] } " = ~ $sid_input ] ] && echo "yes" || echo "no"
2017-05-15 19:47:13 +02:00
fi
[ [ " $DEBUG " -ge 1 ] ] && echo $tls_hello_ascii >$TEMPDIR /$FUNCNAME .tls_hello_ascii${ i } .txt
2017-04-24 09:25:23 +02:00
else
2017-05-15 19:47:13 +02:00
ret = 7
pr_warning "test failed"
2017-04-24 09:25:23 +02:00
out " around line $LINENO (debug info: ${ tls_hello_ascii : 0 : 2 } , ${ tls_hello_ascii : 2 : 10 } ) "
2017-10-31 12:23:16 +01:00
fileout "ticketbleed" "DEBUG" " Ticketbleed: test failed, around $LINENO (debug info: ${ tls_hello_ascii : 0 : 2 } , ${ tls_hello_ascii : 2 : 10 } ) " " $cve " " $cwe "
2017-05-15 19:47:13 +02:00
break
2017-04-24 09:25:23 +02:00
fi
2017-05-15 19:47:13 +02:00
debugme echo "sending close_notify..."
if [ [ ${ tls_hello_ascii : 18 : 4 } = = "0300" ] ] ; then
socksend ",x15, x03, x00, x00, x02, x02, x00" 0
else
socksend ",x15, x03, x01, x00, x02, x02, x00" 0
fi
close_socket
done
2017-04-18 23:15:32 +02:00
2017-05-15 19:47:13 +02:00
if ! " $early_exit " ; then
# here we test the replys if a TLS server hello was received >1x
for i in 1 2 3 ; do
2017-10-09 15:13:46 +02:00
if [ [ " ${ sid_detected [i] } " = ~ $sid_input ] ] ; then
2017-05-15 19:47:13 +02:00
# was our faked TLS SID returned?
nr_sid_detected += 1
fi
done
if [ [ $nr_sid_detected -eq 3 ] ] ; then
if [ [ ${ memory [1] } != ${ memory [2] } ] ] && [ [ ${ memory [2] } != ${ memory [3] } ] ] ; then
pr_svrty_critical "VULNERABLE (NOT ok)"
fileout "ticketbleed" "CRITICAL" "Ticketbleed: VULNERABLE" " $cve " " $cwe " " $hint "
else
pr_done_best "not vulnerable (OK)"
out ", memory fragments do not differ"
fileout "ticketbleed" "OK" "Ticketbleed: not vulnerable, session IDs were returned but memory fragments do not differ" " $cve " " $cwe "
fi
else
if [ [ " $DEBUG " -ge 2 ] ] ; then
echo
pr_warning "test failed, non reproducible results!"
else
pr_warning "test failed, non reproducible results!"
out " Please run again w \"--debug=2\" (# of faked TLS SIDs detected: $nr_sid_detected ) "
fi
2017-10-31 12:23:16 +01:00
fileout "ticketbleed" "DEBUG" " Ticketbleed: # of TLS Session IDs detected: $nr_sid_detected , ${ sid_detected [1] } , ${ sid_detected [2] } , ${ sid_detected [3] } " " $cve " " $cwe "
2017-05-15 19:47:13 +02:00
ret = 7
fi
2017-04-18 23:15:32 +02:00
fi
2017-05-15 19:47:13 +02:00
outln
2017-04-18 23:15:32 +02:00
return $ret
}
2015-07-22 13:11:20 +02:00
run_renego( ) {
2015-05-17 22:43:53 +02:00
# no SNI here. Not needed as there won't be two different SSL stacks for one IP
2015-09-17 15:30:15 +02:00
local legacycmd = ""
local insecure_renogo_str = "Secure Renegotiation IS NOT"
2017-09-19 18:37:03 +02:00
local sec_renego sec_client_renego
2016-11-17 23:27:27 +01:00
local cve = "CVE-2009-3555"
local cwe = "CWE-310"
2016-11-23 09:46:11 +01:00
local hint = ""
2015-05-17 22:43:53 +02:00
2016-01-23 19:18:33 +01:00
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for Renegotiation vulnerabilities " && outln
2015-05-17 22:43:53 +02:00
2017-06-20 23:18:15 +02:00
pr_bold " Secure Renegotiation " ; out " ( $cve ) " # and RFC 5746, OSVDB 59968-59974
2017-08-31 17:22:10 +02:00
# community.qualys.com/blogs/securitylabs/2009/11/05/ssl-and-tls-authentication-gap-vulnerability-discovered
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $OPTIMAL_PROTO $STARTTLS $BUGS -connect $NODEIP : $PORT $SNI $PROXY " ) 2>& 1 </dev/null >$TMPFILE 2>$ERRFILE
2015-10-11 23:07:16 +02:00
if sclient_connect_successful $? $TMPFILE ; then
2015-09-17 15:30:15 +02:00
grep -iaq " $insecure_renogo_str " $TMPFILE
sec_renego = $? # 0= Secure Renegotiation IS NOT supported
2015-05-17 22:43:53 +02:00
#FIXME: didn't occur to me yet but why not also to check on "Secure Renegotiation IS supported"
2015-09-17 15:30:15 +02:00
case $sec_renego in
2017-03-18 22:24:35 +01:00
0) prln_svrty_critical "VULNERABLE (NOT ok)"
2016-11-17 23:27:27 +01:00
fileout "secure_renego" "CRITICAL" "Secure Renegotiation: VULNERABLE" " $cve " " $cwe " " $hint "
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
1) prln_done_best "not vulnerable (OK)"
2016-11-17 23:27:27 +01:00
fileout "secure_renego" "OK" "Secure Renegotiation: not vulnerable" " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
; ;
2017-03-18 22:24:35 +01:00
*) prln_warning " FIXME (bug): $sec_renego "
2016-11-17 23:27:27 +01:00
fileout "secure_renego" "WARN" " Secure Renegotiation: FIXME (bug) $sec_renego " " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
; ;
2015-09-17 15:30:15 +02:00
esac
else
2017-02-25 16:31:30 +01:00
prln_warning "handshake didn't succeed"
2016-11-17 23:27:27 +01:00
fileout "secure_renego" "WARN" "Secure Renegotiation: handshake didn't succeed" " $cve " " $cwe "
2015-09-17 15:30:15 +02:00
fi
pr_bold " Secure Client-Initiated Renegotiation " # RFC 5746
# see: https://community.qualys.com/blogs/securitylabs/2011/10/31/tls-renegotiation-and-denial-of-service-attacks
# http://blog.ivanristic.com/2009/12/testing-for-ssl-renegotiation.html -- head/get doesn't seem to be needed though
case " $OSSL_VER " in
0.9.8*) # we need this for Mac OSX unfortunately
case " $OSSL_VER_APPENDIX " in
2016-01-23 19:18:33 +01:00
[ a-l] )
2017-08-31 17:22:10 +02:00
prln_local_problem " Your $OPENSSL cannot test this secure renegotiation vulnerability "
fileout "sec_client_renego" "WARN" " Secure Client-Initiated Renegotiation: your $OPENSSL cannot test this secure renegotiation vulnerability " " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
return 3
; ;
[ m-z] )
; ; # all ok
esac
; ;
1.0.1*| 1.0.2*)
legacycmd = "-legacy_renegotiation"
; ;
0.9.9*| 1.0*)
; ; # all ok
2015-09-17 15:30:15 +02:00
esac
2016-01-23 19:18:33 +01:00
if " $CLIENT_AUTH " ; then
2017-08-31 17:22:10 +02:00
prln_warning "client x509-based authentication prevents this from being tested"
fileout "sec_client_renego" "WARN" "Secure Client-Initiated Renegotiation : client x509-based authentication prevents this from being tested"
2015-09-17 15:30:15 +02:00
sec_client_renego = 1
else
2015-10-11 23:07:16 +02:00
# We need up to two tries here, as some LiteSpeed servers don't answer on "R" and block. Thus first try in the background
# msg enables us to look deeper into it while debugging
2017-09-19 18:37:03 +02:00
echo R | $OPENSSL s_client $( s_client_options " $OPTIMAL_PROTO $BUGS $legacycmd $STARTTLS -msg -connect $NODEIP : $PORT $SNI $PROXY " ) >$TMPFILE 2>>$ERRFILE &
2015-10-11 23:07:16 +02:00
wait_kill $! $HEADER_MAXSLEEP
if [ [ $? -eq 3 ] ] ; then
2016-12-20 14:17:14 +01:00
pr_done_good "likely not vulnerable (OK)" ; outln ", timed out" # it hung
2016-11-17 23:27:27 +01:00
fileout "sec_client_renego" "OK" "Secure Client-Initiated Renegotiation : likely not vulnerable (timed out)" " $cve " " $cwe "
2015-10-11 23:07:16 +02:00
sec_client_renego = 1
else
# second try in the foreground as we are sure now it won't hang
2017-09-19 18:37:03 +02:00
echo R | $OPENSSL s_client $( s_client_options " $legacycmd $STARTTLS $BUGS -msg -connect $NODEIP : $PORT $SNI $PROXY " ) >$TMPFILE 2>>$ERRFILE
2015-10-11 23:07:16 +02:00
sec_client_renego = $? # 0=client is renegotiating & doesn't return an error --> vuln!
2016-01-23 19:18:33 +01:00
case " $sec_client_renego " in
2016-10-10 23:27:34 +02:00
0) if [ [ $SERVICE = = "HTTP" ] ] ; then
pr_svrty_high "VULNERABLE (NOT ok)" ; outln ", DoS threat"
2016-11-17 23:27:27 +01:00
fileout "sec_client_renego" "HIGH" "Secure Client-Initiated Renegotiation : VULNERABLE, DoS threat" " $cve " " $cwe " " $hint "
2016-10-10 23:27:34 +02:00
else
pr_svrty_medium "VULNERABLE (NOT ok)" ; outln ", potential DoS threat"
2016-11-17 23:27:27 +01:00
fileout "sec_client_renego" "MEDIUM" "Secure Client-Initiated Renegotiation : VULNERABLE, potential DoS threat" " $cve " " $cwe " " $hint "
2016-10-10 23:27:34 +02:00
fi
2016-01-23 19:18:33 +01:00
; ;
1)
2017-02-25 16:31:30 +01:00
prln_done_good "not vulnerable (OK)"
2016-11-17 23:27:27 +01:00
fileout "sec_client_renego" "OK" "Secure Client-Initiated Renegotiation : not vulnerable" " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
; ;
*)
2017-02-25 16:31:30 +01:00
prln_warning " FIXME (bug): $sec_client_renego "
2016-11-17 23:27:27 +01:00
fileout "sec_client_renego" "DEBUG" " Secure Client-Initiated Renegotiation : FIXME (bug) $sec_client_renego - Please report " " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
; ;
2015-10-11 23:07:16 +02:00
esac
fi
2015-09-17 15:30:15 +02:00
fi
#FIXME Insecure Client-Initiated Renegotiation is missing
tmpfile_handle $FUNCNAME .txt
2017-04-12 21:00:08 +02:00
return $(( sec_renego + sec_client_renego))
2015-05-17 22:43:53 +02:00
#FIXME: the return value is wrong, should be 0 if all ok. But as the caller doesn't care we don't care either ... yet ;-)
}
2015-07-22 13:11:20 +02:00
run_crime( ) {
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
local -i ret = 0 sclient_success
2015-09-17 15:30:15 +02:00
local addcmd = ""
2016-11-17 23:27:27 +01:00
local cve = "CVE-2012-4929"
local cwe = "CWE-310"
2016-11-23 09:46:11 +01:00
local hint = ""
2016-11-17 23:27:27 +01:00
2015-09-17 15:30:15 +02:00
# in a nutshell: don't offer TLS/SPDY compression on the server side
# This tests for CRIME Vulnerability (www.ekoparty.org/2012/juliano-rizzo.php) on HTTPS, not SPDY (yet)
2015-05-17 22:43:53 +02:00
# Please note that it is an attack where you need client side control, so in regular situations this
2015-09-17 15:30:15 +02:00
# means anyway "game over", w/wo CRIME
# www.h-online.com/security/news/item/Vulnerability-in-SSL-encryption-is-barely-exploitable-1708604.html
2016-01-23 19:18:33 +01:00
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for CRIME vulnerability " && outln
2016-11-17 23:27:27 +01:00
pr_bold " CRIME, TLS " ; out " ( $cve ) "
2015-09-17 15:30:15 +02:00
# first we need to test whether OpenSSL binary has zlib support
$OPENSSL zlib -e -a -in /dev/stdin & >/dev/stdout </dev/null | grep -q zlib
if [ [ $? -eq 0 ] ] ; then
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
if " $SSL_NATIVE " ; then
2017-02-25 16:31:30 +01:00
prln_local_problem " $OPENSSL lacks zlib support "
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
fileout "crime" "WARN" " CRIME, TLS: Not tested. $OPENSSL lacks zlib support " " $cve " " $cwe "
return 7
else
tls_sockets "03" " $TLS12_CIPHER " "" "" "true"
sclient_success = $?
[ [ $sclient_success -eq 2 ] ] && sclient_success = 0
[ [ $sclient_success -eq 0 ] ] && cp " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " $TMPFILE
fi
else
[ [ " $OSSL_VER " = = "0.9.8" * ] ] && addcmd = "-no_ssl2"
2017-01-05 21:45:18 +01:00
if [ [ $OSSL_VER_MAJOR .$OSSL_VER_MINOR = = "1.1.0" * ] ] || [ [ $OSSL_VER_MAJOR .$OSSL_VER_MINOR = = "1.1.1" * ] ] ; then
addcmd = "-comp"
fi
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $OPTIMAL_PROTO $BUGS $addcmd $STARTTLS -connect $NODEIP : $PORT $PROXY $SNI " ) </dev/null & >$TMPFILE
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
sclient_connect_successful $? $TMPFILE
sclient_success = $?
2015-09-17 15:30:15 +02:00
fi
Use sockets for run_crime()
This PR changes `run_crime()` to use `tls_sockets()` rather than failing if `$OPENSSL` lacks zlib support, unless `$SSL_NATIVE` is `true`.
At the moment, the ClientHello created by `socksend_tls_clienthello()` only specifies the NULL compression method. So, this PR adds a new parameter to `socksend_tls_clienthello()` and `tls_sockets()` to allow to caller to request that additional compression methods (DEFLATE and LZS) be specified in the ClientHello.
This PR makes another change to `run_crime()`. At the moment, if `$OPENSSL s_client` fails to connect to the server, `run_crime()` will report that the server is not vulnerable, since the output from `$OPENSSL s_client` includes the line "Compression: NONE" (see below). This PR changes that by checking whether the connection was successful, and reporting a "test failed (couldn't connect)" warning if it wasn't successful, rather than reporting "not vulnerable (OK)".
```
CONNECTED(00000003)
140338777061024:error:1407742E:SSL routines:SSL23_GET_SERVER_HELLO:tlsv1 alert protocol version:s23_clnt.c:769:
---
no peer certificate available
---
No client certificate CA names sent
---
SSL handshake has read 7 bytes and written 389 bytes
---
New, (NONE), Cipher is (NONE)
Secure Renegotiation IS NOT supported
Compression: NONE
Expansion: NONE
No ALPN negotiated
SSL-Session:
Protocol : TLSv1
Cipher : 0000
Session-ID:
Session-ID-ctx:
Master-Key:
Key-Arg : None
PSK identity: None
PSK identity hint: None
SRP username: None
Start Time: 1483645971
Timeout : 300 (sec)
Verify return code: 0 (ok)
---
```
2017-01-05 20:55:08 +01:00
if [ [ $sclient_success -ne 0 ] ] ; then
pr_warning "test failed (couldn't connect)"
fileout "crime" "WARN" "CRIME, TLS: Check failed. (couldn't connect)" " $cve " " $cwe "
ret = 7
elif grep -a Compression $TMPFILE | grep -aq NONE >/dev/null; then
2016-03-01 20:36:41 +01:00
pr_done_good "not vulnerable (OK)"
2017-06-12 18:23:55 +02:00
if [ [ $SERVICE != "HTTP" ] ] && ! " $CLIENT_AUTH " ; then
2015-10-11 23:07:16 +02:00
out " (not using HTTP anyway)"
2016-11-17 23:27:27 +01:00
fileout "crime" "OK" "CRIME, TLS: Not vulnerable (not using HTTP anyway)" " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
else
2016-11-17 23:27:27 +01:00
fileout "crime" "OK" "CRIME, TLS: Not vulnerable" " $cve " " $cwe "
2015-10-11 23:07:16 +02:00
fi
2015-09-17 15:30:15 +02:00
ret = 0
else
if [ [ $SERVICE = = "HTTP" ] ] ; then
2016-03-01 20:25:41 +01:00
pr_svrty_high "VULNERABLE (NOT ok)"
2016-11-17 23:27:27 +01:00
fileout "crime" "HIGH" "CRIME, TLS: VULNERABLE" " $cve " " $cwe " " $hint "
2015-09-17 15:30:15 +02:00
else
2016-05-27 17:43:45 +02:00
pr_svrty_medium "VULNERABLE but not using HTTP: probably no exploit known"
2016-11-17 23:27:27 +01:00
fileout "crime" "MEDIUM" "CRIME, TLS: VULNERABLE, but not using HTTP: probably no exploit known" " $cve " " $cwe " " $hint "
2015-09-17 15:30:15 +02:00
fi
ret = 1
fi
2015-10-11 23:07:16 +02:00
# not clear whether this is a protocol != HTTP as one needs to have the ability to repeatedly modify the input
# which is done e.g. via javascript in the context of HTTP
2015-09-17 15:30:15 +02:00
outln
2015-05-17 22:43:53 +02:00
# this needs to be re-done i order to remove the redundant check for spdy
2015-09-17 15:30:15 +02:00
# weed out starttls, spdy-crime is a web thingy
# if [[ "x$STARTTLS" != "x" ]]; then
# echo
# return $ret
# fi
# weed out non-webports, spdy-crime is a web thingy. there's a catch thoug, you see it?
# case $PORT in
# 25|465|587|80|110|143|993|995|21)
# echo
# return $ret
# esac
2016-09-21 21:42:45 +02:00
# if "$HAS_NPN"; then
2015-09-17 15:30:15 +02:00
# $OPENSSL s_client -host $NODE -port $PORT -nextprotoneg $NPN_PROTOs $SNI </dev/null 2>/dev/null >$TMPFILE
# if [[ $? -eq 0 ]]; then
# echo
2016-11-17 23:27:27 +01:00
# pr_bold "CRIME Vulnerability, SPDY " ; outln "($cve): "
2015-09-17 15:30:15 +02:00
# STR=$(grep Compression $TMPFILE )
# if echo $STR | grep -q NONE >/dev/null; then
2016-03-01 20:39:30 +01:00
# pr_done_best "not vulnerable (OK)"
2015-09-17 15:30:15 +02:00
# ret=$((ret + 0))
# else
2016-03-01 20:31:26 +01:00
# pr_svrty_critical "VULNERABLE (NOT ok)"
2015-09-17 15:30:15 +02:00
# ret=$((ret + 1))
# fi
# fi
# fi
2017-02-25 16:31:30 +01:00
# [[ $DEBUG -eq 2 ]] tmln_out "$STR"
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
return $ret
2015-05-17 22:43:53 +02:00
}
# BREACH is a HTTP-level compression & an attack which works against any cipher suite and is agnostic
2015-05-29 19:56:57 +02:00
# to the version of TLS/SSL, more: http://www.breachattack.com/ . Foreign referrers are the important thing here!
2015-10-15 14:15:07 +02:00
# Mitigation: see https://community.qualys.com/message/20360
2015-07-22 13:11:20 +02:00
run_breach( ) {
2017-09-19 18:37:03 +02:00
local header
2015-09-17 15:30:15 +02:00
local -i ret = 0
2015-09-28 22:54:00 +02:00
local -i was_killed = 0
2015-09-17 15:30:15 +02:00
local referer useragent
local url
2015-10-13 22:25:01 +02:00
local spaces = " "
local disclaimer = ""
2016-02-07 19:13:59 +01:00
local when_makesense = " Can be ignored for static pages or if no secrets in the page"
2016-11-17 23:27:27 +01:00
local cve = "CVE-2013-3587"
local cwe = "CWE-310"
2016-11-23 09:46:11 +01:00
local hint = ""
2015-09-17 15:30:15 +02:00
2017-09-01 16:13:32 +02:00
[ [ $SERVICE != "HTTP" ] ] && ! " $CLIENT_AUTH " && return 7
2015-09-17 15:30:15 +02:00
2015-10-15 14:15:07 +02:00
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for BREACH (HTTP compression) vulnerability " && outln
2016-11-17 23:27:27 +01:00
pr_bold " BREACH" ; out " ( $cve ) "
2017-06-12 18:23:55 +02:00
if " $CLIENT_AUTH " ; then
2017-10-31 12:23:16 +01:00
outln "cannot be tested (server side requires x509 authentication)"
2017-08-31 17:22:10 +02:00
fileout "breach" "INFO" "BREACH: cannot be tested (server side requires x509 authentication)" " $cve " " $cwe "
return 7
2017-06-12 18:23:55 +02:00
fi
2015-09-17 15:30:15 +02:00
url = " $1 "
[ [ -z " $url " ] ] && url = "/"
2015-10-13 22:25:01 +02:00
disclaimer = " - only supplied \" $url \" tested "
2015-10-11 23:07:16 +02:00
referer = "https://google.com/"
2015-10-15 14:15:07 +02:00
[ [ " $NODE " = ~ google ] ] && referer = "https://yandex.ru/" # otherwise we have a false positive for google.com
2015-10-11 23:07:16 +02:00
useragent = " $UA_STD "
2015-10-15 14:15:07 +02:00
$SNEAKY && useragent = " $UA_SNEAKY "
2017-09-19 18:37:03 +02:00
printf " GET $url HTTP/1.1\r\nHost: $NODE \r\nUser-Agent: $useragent \r\nReferer: $referer \r\nConnection: Close\r\nAccept-encoding: gzip,deflate,compress\r\nAccept: text/*\r\n\r\n " | $OPENSSL s_client $( s_client_options " $OPTIMAL_PROTO $BUGS -quiet -ign_eof -connect $NODEIP : $PORT $PROXY $SNI " ) 1>$TMPFILE 2>$ERRFILE &
2015-09-28 22:54:00 +02:00
wait_kill $! $HEADER_MAXSLEEP
2015-10-11 23:07:16 +02:00
was_killed = $? # !=0 was killed
result = $( awk '/^Content-Encoding/ { print $2 }' $TMPFILE )
2015-09-28 22:54:00 +02:00
result = $( strip_lf " $result " )
2015-10-11 23:07:16 +02:00
debugme grep '^Content-Encoding' $TMPFILE
if [ [ ! -s $TMPFILE ] ] ; then
2016-03-05 21:07:49 +01:00
pr_warning "failed (HTTP header request stalled"
2016-01-23 19:18:33 +01:00
if [ [ $was_killed -ne 0 ] ] ; then
2016-03-05 21:07:49 +01:00
pr_warning " and was terminated"
2016-11-17 23:27:27 +01:00
fileout "breach" "WARN" "BREACH: Test failed (HTTP request stalled and was terminated)" " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
else
2016-11-17 23:27:27 +01:00
fileout "breach" "WARN" "BREACH: Test failed (HTTP request stalled)" " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
fi
2017-02-25 16:31:30 +01:00
prln_warning ") "
2015-09-17 15:30:15 +02:00
ret = 3
2015-09-28 22:54:00 +02:00
elif [ [ -z $result ] ] ; then
2016-03-01 20:39:30 +01:00
pr_done_best "no HTTP compression (OK) "
2015-10-13 22:25:01 +02:00
outln " $disclaimer "
2016-11-17 23:27:27 +01:00
fileout "breach" "OK" " BREACH: no HTTP compression $disclaimer " " $cve " " $cwe "
2015-10-13 22:25:01 +02:00
ret = 0
2015-09-28 22:54:00 +02:00
else
2016-03-01 20:25:41 +01:00
pr_svrty_high " potentially NOT ok, uses $result HTTP compression. "
2015-10-13 22:25:01 +02:00
outln " $disclaimer "
2016-01-23 19:18:33 +01:00
outln " $spaces $when_makesense "
2017-04-25 16:32:06 +02:00
fileout "breach" "HIGH" " BREACH: potentially VULNERABLE, uses $result HTTP compression. $disclaimer ( $when_makesense ) " " $cve " " $cwe " " $hint "
2016-01-23 19:18:33 +01:00
ret = 1
2015-09-17 15:30:15 +02:00
fi
2015-09-28 22:54:00 +02:00
# Any URL can be vulnerable. I am testing now only the given URL!
tmpfile_handle $FUNCNAME .txt
2015-09-17 15:30:15 +02:00
return $ret
2015-05-17 22:43:53 +02:00
}
2017-08-31 17:22:10 +02:00
# SWEET32 (https://sweet32.info/). Birthday attacks on 64-bit block ciphers.
# In a nutshell: don't use 3DES ciphers anymore (DES, RC2 and IDEA too)
2017-02-02 14:42:06 +01:00
run_sweet32( ) {
2017-10-11 21:41:05 +02:00
local -i sclient_success = 1
2017-02-02 14:42:06 +01:00
# DES, RC2 and IDEA are missing
local sweet32_ciphers = "ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:SRP-DSS-3DES-EDE-CBC-SHA:SRP-RSA-3DES-EDE-CBC-SHA:SRP-3DES-EDE-CBC-SHA:EDH-RSA-DES-CBC3-SHA:EDH-DSS-DES-CBC3-SHA:DH-RSA-DES-CBC3-SHA:DH-DSS-DES-CBC3-SHA:AECDH-DES-CBC3-SHA:ADH-DES-CBC3-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-ECDSA-DES-CBC3-SHA:DES-CBC3-SHA:DES-CBC3-MD5:RSA-PSK-3DES-EDE-CBC-SHA:PSK-3DES-EDE-CBC-SHA:KRB5-DES-CBC3-SHA:KRB5-DES-CBC3-MD5:ECDHE-PSK-3DES-EDE-CBC-SHA:DHE-PSK-3DES-EDE-CBC-SHA"
local sweet32_ciphers_hex = "c0,12, c0,08, c0,1c, c0,1b, c0,1a, 00,16, 00,13, 00,10, 00,0d, c0,17, 00,1b, c0,0d, c0,03, 00,0a, 00,93, 00,8b, 00,1f, 00,23, c0,34, 00,8f, fe,ff, ff,e0"
# proper parsing to be clarified: 07,00,c0
2017-10-11 21:41:05 +02:00
local proto
2017-02-02 14:42:06 +01:00
local cve = "CVE-2016-2183, CVE-2016-6329"
local cwe = "CWE-327"
local hint = ""
local -i nr_sweet32_ciphers = 0
local using_sockets = true
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for SWEET32 (Birthday Attacks on 64-bit Block Ciphers) " && outln
pr_bold " SWEET32" ; out " ( $cve ) "
" $SSL_NATIVE " && using_sockets = false
# The openssl binary distributed has almost everything we need (PSK, KRB5 ciphers and feff, ffe0 are typically missing).
# Measurements show that there's little impact whether we use sockets or TLS here, so the default is sockets here
if " $using_sockets " ; then
2017-10-11 21:41:05 +02:00
for proto in 03 02 01 00; do
" $FAST " && [ [ " $proto " != "03" ] ] && break
! " $FAST " && [ [ $( has_server_protocol " $proto " ) -eq 1 ] ] && continue
tls_sockets " $proto " " ${ sweet32_ciphers_hex } "
sclient_success = $?
[ [ $sclient_success -eq 2 ] ] && sclient_success = 0
[ [ $sclient_success -eq 0 ] ] && break
done
2017-02-02 14:42:06 +01:00
else
nr_sweet32_ciphers = $( count_ciphers $sweet32_ciphers )
nr_supported_ciphers = $( count_ciphers $( actually_supported_ciphers $sweet32_ciphers ) )
2017-10-11 21:41:05 +02:00
for proto in -no_ssl2 -tls1_1 -tls1 -ssl3; do
! " $HAS_SSL3 " && [ [ " $proto " = = "-ssl3" ] ] && continue
if [ [ " $proto " != "-no_ssl2" ] ] ; then
" $FAST " && break
[ [ $( has_server_protocol " ${ proto : 1 } " ) -eq 1 ] ] && continue
fi
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS $proto -cipher $sweet32_ciphers -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
sclient_connect_successful $? $TMPFILE
sclient_success = $?
[ [ $DEBUG -eq 2 ] ] && egrep -q "error|failure" $ERRFILE | egrep -av "unable to get local|verify error"
[ [ $sclient_success -eq 0 ] ] && break
done
2017-02-02 14:42:06 +01:00
fi
if [ [ $sclient_success -eq 0 ] ] ; then
2017-02-03 13:03:22 +01:00
pr_svrty_low "VULNERABLE" ; out ", uses 64 bit block ciphers"
2017-02-02 14:42:06 +01:00
fileout "sweet32" "LOW" "SWEET32, uses 64 bit block ciphers" " $cve " " $cwe " " $hint "
else
pr_done_best "not vulnerable (OK)" ;
if " $using_sockets " ; then
fileout "sweet32" "OK" "SWEET32: not vulnerable" " $cve " " $cwe "
else
if [ [ " $nr_supported_ciphers " -ge 17 ] ] ; then
# Likely only PSK/KRB5 ciphers are missing: display discrepancy but no warning
out " , $nr_supported_ciphers / $nr_sweet32_ciphers local ciphers "
else
pr_warning " , $nr_supported_ciphers / $nr_sweet32_ciphers local ciphers "
fi
fileout "sweet32" "OK" " SWEET32: not vulnerable ( $nr_supported_ciphers of $nr_sweet32_ciphers local ciphers " " $cve " " $cwe "
fi
fi
outln
tmpfile_handle $FUNCNAME .txt
return $sclient_success
}
2015-05-29 19:44:27 +02:00
# Padding Oracle On Downgraded Legacy Encryption, in a nutshell: don't use CBC Ciphers in SSLv3
2015-07-22 13:11:20 +02:00
run_ssl_poodle( ) {
2015-10-11 23:07:16 +02:00
local -i sclient_success = 0
Use sockets for run_ssl_poodle()
This PR changes `run_ssl_poodle()` to use sockets. This PR is particularly useful when $OPENSSL is OpenSSL 1.1.0, since OpenSS 1.1.0 does not support SSLv3 by default. But, it is also useful if $OPENSSL supports some, but not all, of the CBC ciphers.
As with `run_beast()`, there is a small change to `$cbc_cipher_list`. The following two ciphers were added:
```
0x00,0x0B - EXP-DH-DSS-DES-CBC-SHA SSLv3 Kx=DH/DSS Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
```
The ciphers that were removed are all SSLv2 ciphers:
```
0x07,0x00,0xC0 - DES-CBC3-MD5 SSLv2 Kx=RSA Au=RSA Enc=3DES(168) Mac=MD5
0x06,0x00,0x40 - DES-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=DES(56) Mac=MD5
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x05,0x00,0x80 - IDEA-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=IDEA(128) Mac=MD5
0x03,0x00,0x80 - RC2-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=RC2(128) Mac=MD5
```
(EXP-RC2-CBC-MD5 is both an SSLv2 and an SSLv3 cipher. Previously it was listed twice in `$cbc_cipher_list`, now it appears once.)
2016-12-21 16:36:09 +01:00
local cbc_ciphers = "ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:SRP-DSS-AES-256-CBC-SHA:SRP-RSA-AES-256-CBC-SHA:SRP-AES-256-CBC-SHA:DHE-PSK-AES256-CBC-SHA:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DH-RSA-AES256-SHA:DH-DSS-AES256-SHA:DHE-RSA-CAMELLIA256-SHA:DHE-DSS-CAMELLIA256-SHA:DH-RSA-CAMELLIA256-SHA:DH-DSS-CAMELLIA256-SHA:AECDH-AES256-SHA:ADH-AES256-SHA:ADH-CAMELLIA256-SHA:ECDH-RSA-AES256-SHA:ECDH-ECDSA-AES256-SHA:AES256-SHA:ECDHE-PSK-AES256-CBC-SHA:CAMELLIA256-SHA:RSA-PSK-AES256-CBC-SHA:PSK-AES256-CBC-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:SRP-DSS-AES-128-CBC-SHA:SRP-RSA-AES-128-CBC-SHA:SRP-AES-128-CBC-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:DH-RSA-AES128-SHA:DH-DSS-AES128-SHA:DHE-RSA-SEED-SHA:DHE-DSS-SEED-SHA:DH-RSA-SEED-SHA:DH-DSS-SEED-SHA:DHE-RSA-CAMELLIA128-SHA:DHE-DSS-CAMELLIA128-SHA:DH-RSA-CAMELLIA128-SHA:DH-DSS-CAMELLIA128-SHA:AECDH-AES128-SHA:ADH-AES128-SHA:ADH-SEED-SHA:ADH-CAMELLIA128-SHA:ECDH-RSA-AES128-SHA:ECDH-ECDSA-AES128-SHA:AES128-SHA:ECDHE-PSK-AES128-CBC-SHA:DHE-PSK-AES128-CBC-SHA:SEED-SHA:CAMELLIA128-SHA:IDEA-CBC-SHA:RSA-PSK-AES128-CBC-SHA:PSK-AES128-CBC-SHA:KRB5-IDEA-CBC-SHA:KRB5-IDEA-CBC-MD5:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:SRP-DSS-3DES-EDE-CBC-SHA:SRP-RSA-3DES-EDE-CBC-SHA:SRP-3DES-EDE-CBC-SHA:EDH-RSA-DES-CBC3-SHA:EDH-DSS-DES-CBC3-SHA:DH-RSA-DES-CBC3-SHA:DH-DSS-DES-CBC3-SHA:AECDH-DES-CBC3-SHA:ADH-DES-CBC3-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-ECDSA-DES-CBC3-SHA:DES-CBC3-SHA:RSA-PSK-3DES-EDE-CBC-SHA:PSK-3DES-EDE-CBC-SHA:KRB5-DES-CBC3-SHA:KRB5-DES-CBC3-MD5:ECDHE-PSK-3DES-EDE-CBC-SHA:DHE-PSK-3DES-EDE-CBC-SHA:EXP1024-DHE-DSS-DES-CBC-SHA:EDH-RSA-DES-CBC-SHA:EDH-DSS-DES-CBC-SHA:DH-RSA-DES-CBC-SHA:DH-DSS-DES-CBC-SHA:ADH-DES-CBC-SHA:EXP1024-DES-CBC-SHA:DES-CBC-SHA:KRB5-DES-CBC-SHA:KRB5-DES-CBC-MD5:EXP-EDH-RSA-DES-CBC-SHA:EXP-EDH-DSS-DES-CBC-SHA:EXP-ADH-DES-CBC-SHA:EXP-DES-CBC-SHA:EXP-RC2-CBC-MD5:EXP-KRB5-RC2-CBC-SHA:EXP-KRB5-DES-CBC-SHA:EXP-KRB5-RC2-CBC-MD5:EXP-KRB5-DES-CBC-MD5:EXP-DH-DSS-DES-CBC-SHA:EXP-DH-RSA-DES-CBC-SHA"
local cbc_ciphers_hex = "c0,14, c0,0a, c0,22, c0,21, c0,20, 00,91, 00,39, 00,38, 00,37, 00,36, 00,88, 00,87, 00,86, 00,85, c0,19, 00,3a, 00,89, c0,0f, c0,05, 00,35, c0,36, 00,84, 00,95, 00,8d, c0,13, c0,09, c0,1f, c0,1e, c0,1d, 00,33, 00,32, 00,31, 00,30, 00,9a, 00,99, 00,98, 00,97, 00,45, 00,44, 00,43, 00,42, c0,18, 00,34, 00,9b, 00,46, c0,0e, c0,04, 00,2f, c0,35, 00,90, 00,96, 00,41, 00,07, 00,94, 00,8c, 00,21, 00,25, c0,12, c0,08, c0,1c, c0,1b, c0,1a, 00,16, 00,13, 00,10, 00,0d, c0,17, 00,1b, c0,0d, c0,03, 00,0a, 00,93, 00,8b, 00,1f, 00,23, c0,34, 00,8f, 00,63, 00,15, 00,12, 00,0f, 00,0c, 00,1a, 00,62, 00,09, 00,1e, 00,22, 00,14, 00,11, 00,19, 00,08, 00,06, 00,27, 00,26, 00,2a, 00,29, 00,0b, 00,0e"
2016-11-17 23:27:27 +01:00
local cve = "CVE-2014-3566"
local cwe = "CWE-310"
2016-11-23 09:46:11 +01:00
local hint = ""
2017-01-21 16:52:02 +01:00
local -i nr_cbc_ciphers = 0
Use sockets for run_ssl_poodle()
This PR changes `run_ssl_poodle()` to use sockets. This PR is particularly useful when $OPENSSL is OpenSSL 1.1.0, since OpenSS 1.1.0 does not support SSLv3 by default. But, it is also useful if $OPENSSL supports some, but not all, of the CBC ciphers.
As with `run_beast()`, there is a small change to `$cbc_cipher_list`. The following two ciphers were added:
```
0x00,0x0B - EXP-DH-DSS-DES-CBC-SHA SSLv3 Kx=DH/DSS Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
```
The ciphers that were removed are all SSLv2 ciphers:
```
0x07,0x00,0xC0 - DES-CBC3-MD5 SSLv2 Kx=RSA Au=RSA Enc=3DES(168) Mac=MD5
0x06,0x00,0x40 - DES-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=DES(56) Mac=MD5
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x05,0x00,0x80 - IDEA-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=IDEA(128) Mac=MD5
0x03,0x00,0x80 - RC2-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=RC2(128) Mac=MD5
```
(EXP-RC2-CBC-MD5 is both an SSLv2 and an SSLv3 cipher. Previously it was listed twice in `$cbc_cipher_list`, now it appears once.)
2016-12-21 16:36:09 +01:00
local using_sockets = true
2015-09-17 15:30:15 +02:00
2016-01-23 19:18:33 +01:00
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for SSLv3 POODLE (Padding Oracle On Downgraded Legacy Encryption) " && outln
2016-11-17 23:27:27 +01:00
pr_bold " POODLE, SSL" ; out " ( $cve ) "
2015-10-11 23:07:16 +02:00
2017-01-21 16:52:02 +01:00
" $SSL_NATIVE " && using_sockets = false
# The openssl binary distributed has almost everything we need (PSK and KRB5 ciphers are typically missing).
# Measurements show that there's little impact whether we use sockets or TLS here, so the default is sockets here
Use sockets for run_ssl_poodle()
This PR changes `run_ssl_poodle()` to use sockets. This PR is particularly useful when $OPENSSL is OpenSSL 1.1.0, since OpenSS 1.1.0 does not support SSLv3 by default. But, it is also useful if $OPENSSL supports some, but not all, of the CBC ciphers.
As with `run_beast()`, there is a small change to `$cbc_cipher_list`. The following two ciphers were added:
```
0x00,0x0B - EXP-DH-DSS-DES-CBC-SHA SSLv3 Kx=DH/DSS Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
```
The ciphers that were removed are all SSLv2 ciphers:
```
0x07,0x00,0xC0 - DES-CBC3-MD5 SSLv2 Kx=RSA Au=RSA Enc=3DES(168) Mac=MD5
0x06,0x00,0x40 - DES-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=DES(56) Mac=MD5
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x05,0x00,0x80 - IDEA-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=IDEA(128) Mac=MD5
0x03,0x00,0x80 - RC2-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=RC2(128) Mac=MD5
```
(EXP-RC2-CBC-MD5 is both an SSLv2 and an SSLv3 cipher. Previously it was listed twice in `$cbc_cipher_list`, now it appears once.)
2016-12-21 16:36:09 +01:00
if " $using_sockets " ; then
tls_sockets "00" " $cbc_ciphers_hex "
sclient_success = $?
else
2017-01-21 16:52:02 +01:00
if ! " $HAS_SSL3 " ; then
2017-02-25 16:31:30 +01:00
prln_local_problem " Your $OPENSSL doesn't support SSLv3 "
2017-01-21 16:52:02 +01:00
return 1
fi
nr_cbc_ciphers = $( count_ciphers $cbc_ciphers )
nr_supported_ciphers = $( count_ciphers $( actually_supported_ciphers $cbc_ciphers ) )
2017-02-02 14:42:06 +01:00
# SNI not needed as SSLv3 has none:
Use sockets for run_ssl_poodle()
This PR changes `run_ssl_poodle()` to use sockets. This PR is particularly useful when $OPENSSL is OpenSSL 1.1.0, since OpenSS 1.1.0 does not support SSLv3 by default. But, it is also useful if $OPENSSL supports some, but not all, of the CBC ciphers.
As with `run_beast()`, there is a small change to `$cbc_cipher_list`. The following two ciphers were added:
```
0x00,0x0B - EXP-DH-DSS-DES-CBC-SHA SSLv3 Kx=DH/DSS Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
```
The ciphers that were removed are all SSLv2 ciphers:
```
0x07,0x00,0xC0 - DES-CBC3-MD5 SSLv2 Kx=RSA Au=RSA Enc=3DES(168) Mac=MD5
0x06,0x00,0x40 - DES-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=DES(56) Mac=MD5
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x05,0x00,0x80 - IDEA-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=IDEA(128) Mac=MD5
0x03,0x00,0x80 - RC2-CBC-MD5 SSLv2 Kx=RSA Au=RSA Enc=RC2(128) Mac=MD5
```
(EXP-RC2-CBC-MD5 is both an SSLv2 and an SSLv3 cipher. Previously it was listed twice in `$cbc_cipher_list`, now it appears once.)
2016-12-21 16:36:09 +01:00
$OPENSSL s_client -ssl3 $STARTTLS $BUGS -cipher $cbc_ciphers -connect $NODEIP :$PORT $PROXY >$TMPFILE 2>$ERRFILE </dev/null
sclient_connect_successful $? $TMPFILE
sclient_success = $?
[ [ " $DEBUG " -eq 2 ] ] && egrep -q "error|failure" $ERRFILE | egrep -av "unable to get local|verify error"
fi
2015-10-11 23:07:16 +02:00
if [ [ $sclient_success -eq 0 ] ] ; then
2017-07-25 10:54:01 +02:00
POODLE = 0
2016-03-01 20:25:41 +01:00
pr_svrty_high "VULNERABLE (NOT ok)" ; out ", uses SSLv3+CBC (check TLS_FALLBACK_SCSV mitigation below)"
2016-11-17 23:27:27 +01:00
fileout "poodle_ssl" "HIGH" "POODLE, SSL: VULNERABLE, uses SSLv3+CBC" " $cve " " $cwe " " $hint "
2015-09-17 15:30:15 +02:00
else
2017-07-25 10:54:01 +02:00
POODLE = 1
2017-01-21 16:52:02 +01:00
pr_done_best "not vulnerable (OK)" ;
2017-01-23 11:33:18 +01:00
if " $using_sockets " ; then
fileout "poodle_ssl" "OK" "POODLE, SSL: not vulnerable" " $cve " " $cwe "
2017-01-21 16:52:02 +01:00
else
2017-01-23 11:33:18 +01:00
if [ [ " $nr_supported_ciphers " -ge 83 ] ] ; then
# Likely only KRB and PSK cipher are missing: display discrepancy but no warning
out " , $nr_supported_ciphers / $nr_cbc_ciphers local ciphers "
else
pr_warning " , $nr_supported_ciphers / $nr_cbc_ciphers local ciphers "
fi
fileout "poodle_ssl" "OK" " POODLE, SSL: not vulnerable ( $nr_supported_ciphers of $nr_cbc_ciphers local ciphers " " $cve " " $cwe "
2017-01-21 16:52:02 +01:00
fi
2015-09-17 15:30:15 +02:00
fi
outln
tmpfile_handle $FUNCNAME .txt
2015-10-11 23:07:16 +02:00
return $sclient_success
2015-05-17 22:43:53 +02:00
}
2015-05-29 19:56:57 +02:00
# for appliance which use padding, no fallback needed
2015-07-22 13:11:20 +02:00
run_tls_poodle( ) {
2016-11-17 23:27:27 +01:00
local cve = "CVE-2014-8730"
local cwe = "CWE-310"
pr_bold " POODLE, TLS" ; out " ( $cve ), experimental "
2015-09-17 15:30:15 +02:00
#FIXME
2017-02-25 16:31:30 +01:00
prln_warning "#FIXME"
2016-11-17 23:27:27 +01:00
fileout "poodle_tls" "WARN" "POODLE, TLS: Not tested. Not yet implemented #FIXME" " $cve " " $cwe "
2015-09-17 15:30:15 +02:00
return 7
2015-05-17 22:43:53 +02:00
}
2015-07-22 13:11:20 +02:00
run_tls_fallback_scsv( ) {
2015-09-17 15:30:15 +02:00
local -i ret = 0
2016-01-23 19:18:33 +01:00
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for TLS_FALLBACK_SCSV Protection " && outln
2017-02-03 11:47:21 +01:00
pr_bold " TLS_FALLBACK_SCSV" ; out " (RFC 7507) "
2015-09-17 15:30:15 +02:00
# This isn't a vulnerability check per se, but checks for the existence of
# the countermeasure to protect against protocol downgrade attacks.
# First check we have support for TLS_FALLBACK_SCSV in our local OpenSSL
2016-09-21 21:42:45 +02:00
if ! " $HAS_FALLBACK_SCSV " ; then
2017-02-25 16:31:30 +01:00
prln_local_problem " $OPENSSL lacks TLS_FALLBACK_SCSV support "
2015-09-17 15:30:15 +02:00
return 4
fi
#TODO: this need some tuning: a) if one protocol is supported only it has practcally no value (theoretical it's interesting though)
# b) for IIS6 + openssl 1.0.2 this won't work
# c) best to make sure that we hit a specific protocol, see https://alpacapowered.wordpress.com/2014/10/20/ssl-poodle-attack-what-is-this-scsv-thingy/
# d) minor: we should do "-state" here
More SSLv2 (and SSLv3) related fixes
In doing some work on cipher_pref_check() I noticed that it was failing on SSLv2 since the call to "$OPENSSL s_client" includes SNI. I've also noticed in my testing that "$OPENSSL s_client" will not connect to an SSLv2-only server unless the "-ssl2" flag is included. So, I carefully checked each call to "$OPENSSL s_client" in the program (other than in run_allciphers and run_cipher_per_proto, since those functions are already addresses in PR #341) to see whether they would inappropriate fail with an SSLv2-only (or SSLv3-only) server.
As a general rule, if the call doesn't currently include the protocol, then I added "-ssl2" if $OPTIMAL_PROTO is "-ssl2", indicating that the server only supports SSLv2, and I removed any $SNI if a protocol is specified if a protocol is specified and it is either SSLv2 or SSLv3.
I tested it on an SSLv2-only server, and the results are much better. I also tested it on a collection of other servers, none of which support SSLv2, and the results are the same as with the current code.
The only thing I haven't been able to test is how the revised code works when the "--starttls" option is used. I don't believe the changes I made would cause anything to break in that case, but I also don't think code will work any better in that case, if the server only supports SSLv2. Of course, since no server should support SSLv2 (let alone only SSLv2), it shouldn't really be an issue.
One thing that I did not change, but that I do not understand; why does determine_optimal_proto() try the protocols in the order "-tls1_2 -tls1 -ssl3 -tls1_1 -ssl2" rather than "-tls1_2 -tls1_1 -tls1 -ssl3 -ssl2"? Doesn't the current ordering imply that TLS v1.0 and SSLv3 are better than TLS v1.1?
2016-04-29 23:04:01 +02:00
# first: make sure SSLv3 or some TLS protocol is supported
if [ [ " $OPTIMAL_PROTO " = = "-ssl2" ] ] ; then
2017-02-25 16:31:30 +01:00
prln_svrty_critical "No fallback possible, SSLv2 is the only protocol"
More SSLv2 (and SSLv3) related fixes
In doing some work on cipher_pref_check() I noticed that it was failing on SSLv2 since the call to "$OPENSSL s_client" includes SNI. I've also noticed in my testing that "$OPENSSL s_client" will not connect to an SSLv2-only server unless the "-ssl2" flag is included. So, I carefully checked each call to "$OPENSSL s_client" in the program (other than in run_allciphers and run_cipher_per_proto, since those functions are already addresses in PR #341) to see whether they would inappropriate fail with an SSLv2-only (or SSLv3-only) server.
As a general rule, if the call doesn't currently include the protocol, then I added "-ssl2" if $OPTIMAL_PROTO is "-ssl2", indicating that the server only supports SSLv2, and I removed any $SNI if a protocol is specified if a protocol is specified and it is either SSLv2 or SSLv3.
I tested it on an SSLv2-only server, and the results are much better. I also tested it on a collection of other servers, none of which support SSLv2, and the results are the same as with the current code.
The only thing I haven't been able to test is how the revised code works when the "--starttls" option is used. I don't believe the changes I made would cause anything to break in that case, but I also don't think code will work any better in that case, if the server only supports SSLv2. Of course, since no server should support SSLv2 (let alone only SSLv2), it shouldn't really be an issue.
One thing that I did not change, but that I do not understand; why does determine_optimal_proto() try the protocols in the order "-tls1_2 -tls1 -ssl3 -tls1_1 -ssl2" rather than "-tls1_2 -tls1_1 -tls1 -ssl3 -ssl2"? Doesn't the current ordering imply that TLS v1.0 and SSLv3 are better than TLS v1.1?
2016-04-29 23:04:01 +02:00
return 7
fi
# second: make sure we have tls1_2:
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI -no_tls1_2 " ) >$TMPFILE 2>$ERRFILE </dev/null
2015-10-11 23:07:16 +02:00
if ! sclient_connect_successful $? $TMPFILE ; then
2016-03-01 20:36:41 +01:00
pr_done_good "No fallback possible, TLS 1.2 is the only protocol (OK)"
2015-09-17 15:30:15 +02:00
ret = 7
else
2015-10-11 23:07:16 +02:00
# ...and do the test (we need to parse the error here!)
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI -no_tls1_2 -fallback_scsv " ) & >$TMPFILE </dev/null
2015-09-17 15:30:15 +02:00
if grep -q "CONNECTED(00" " $TMPFILE " ; then
if grep -qa "BEGIN CERTIFICATE" " $TMPFILE " ; then
2017-07-25 10:54:01 +02:00
if [ [ -z " $POODLE " ] ] ; then
pr_warning "Rerun including POODLE SSL check. "
pr_svrty_medium "Downgrade attack prevention NOT supported"
2017-08-28 20:54:08 +02:00
fileout "fallback_scsv" "WARN" "TLS_FALLBACK_SCSV (RFC 7507): Downgrade attack prevention NOT supported. Pls rerun wity POODLE SSL check"
2017-07-25 10:54:01 +02:00
ret = 1
elif [ [ " $POODLE " -eq 0 ] ] ; then
pr_svrty_high "Downgrade attack prevention NOT supported and vulnerable to POODLE SSL"
fileout "fallback_scsv" "HIGH" "TLS_FALLBACK_SCSV (RFC 7507): Downgrade attack prevention NOT supported and vulnerable to POODLE SSL"
ret = 0
else
pr_svrty_medium "Downgrade attack prevention NOT supported"
fileout "fallback_scsv" "MEDIUM" "TLS_FALLBACK_SCSV (RFC 7507): Downgrade attack prevention NOT supported"
ret = 1
fi
2015-09-17 15:30:15 +02:00
elif grep -qa "alert inappropriate fallback" " $TMPFILE " ; then
2016-03-01 20:36:41 +01:00
pr_done_good "Downgrade attack prevention supported (OK)"
2016-11-17 23:27:27 +01:00
fileout "fallback_scsv" "OK" "TLS_FALLBACK_SCSV (RFC 7507) (experimental) : Downgrade attack prevention supported"
2015-09-17 15:30:15 +02:00
ret = 0
elif grep -qa "alert handshake failure" " $TMPFILE " ; then
2017-02-03 11:47:21 +01:00
pr_done_good "Probably OK. "
fileout "fallback_scsv" "OK" "TLS_FALLBACK_SCSV (RFC 7507) (experimental) : Probably oK"
2015-09-17 15:30:15 +02:00
# see RFC 7507, https://github.com/drwetter/testssl.sh/issues/121
2017-02-03 11:47:21 +01:00
# other case reported by Nicolas was F5 and at costumer of mine: the same
pr_svrty_medium "But received non-RFC-compliant \"handshake failure\" instead of \"inappropriate fallback\""
fileout "fallback_scsv" "MEDIUM" "TLS_FALLBACK_SCSV (RFC 7507) (experimental) : But received non-RFC-compliant \"handshake failure\" instead of \"inappropriate fallback\""
2015-09-17 15:30:15 +02:00
ret = 2
elif grep -qa "ssl handshake failure" " $TMPFILE " ; then
2016-05-27 17:43:45 +02:00
pr_svrty_medium "some unexpected \"handshake failure\" instead of \"inappropriate fallback\""
fileout "fallback_scsv" "MEDIUM" "TLS_FALLBACK_SCSV (RFC 7507) (experimental) : some unexpected \"handshake failure\" instead of \"inappropriate fallback\" (likely: warning)"
2015-09-17 15:30:15 +02:00
ret = 3
else
2016-03-05 21:07:49 +01:00
pr_warning "Check failed, unexpected result "
2015-09-17 15:30:15 +02:00
out " , run $PROG_NAME -Z --debug=1 and look at $TEMPDIR /*tls_fallback_scsv.txt "
2016-01-23 23:33:17 +01:00
fileout "fallback_scsv" "WARN" " TLS_FALLBACK_SCSV (RFC 7507) (experimental) : Check failed, unexpected result, run $PROG_NAME -Z --debug=1 and look at $TEMPDIR /*tls_fallback_scsv.txt "
2015-09-17 15:30:15 +02:00
fi
else
2016-03-05 21:07:49 +01:00
pr_warning "test failed (couldn't connect)"
2016-01-23 23:33:17 +01:00
fileout "fallback_scsv" "WARN" "TLS_FALLBACK_SCSV (RFC 7507) (experimental) : Check failed. (couldn't connect)"
2015-09-17 15:30:15 +02:00
ret = 7
fi
fi
outln
tmpfile_handle $FUNCNAME .txt
return $ret
2015-06-11 21:41:25 +02:00
}
2015-05-27 23:31:25 +02:00
2015-05-17 22:43:53 +02:00
# Factoring RSA Export Keys: don't use EXPORT RSA ciphers, see https://freakattack.com/
2015-07-22 13:11:20 +02:00
run_freak( ) {
2015-10-11 23:07:16 +02:00
local -i sclient_success = 0
2017-01-04 16:31:13 +01:00
local -i i nr_supported_ciphers = 0 len
Use sockets for run_freak()
This PR changes `run_freak()` to use sockets.
I added two ciphers to `$exportrsa_cipher_list`: EXP1024-RC4-MD5 (0x00,0x60) and EXP1024-RC2-CBC-MD5 (0x00,0x61). So, the list is now as follows:
```
0x00,0x62 - EXP1024-DES-CBC-SHA TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=DES(56) Mac=SHA1 export
0x00,0x61 - EXP1024-RC2-CBC-MD5 TLS_RSA_EXPORT1024_WITH_RC2_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC2(56) Mac=MD5 export
0x00,0x64 - EXP1024-RC4-SHA TLS_RSA_EXPORT1024_WITH_RC4_56_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=SHA1 export
0x00,0x60 - EXP1024-RC4-MD5 TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=MD5 export
0x00,0x14 - EXP-EDH-RSA-DES-CBC-SHA TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x08 - EXP-DES-CBC-SHA TLS_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=RSA(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x06 - EXP-RC2-CBC-MD5 TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x03 - EXP-RC4-MD5 TLS_RSA_EXPORT_WITH_RC4_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
0x02,0x00,0x80 - EXP-RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
```
2016-12-21 19:16:10 +01:00
# with correct build it should list these 9 ciphers (plus the two latter as SSLv2 ciphers):
local exportrsa_cipher_list = "EXP1024-DES-CBC-SHA:EXP1024-RC2-CBC-MD5:EXP1024-RC4-SHA:EXP1024-RC4-MD5:EXP-EDH-RSA-DES-CBC-SHA:EXP-DH-RSA-DES-CBC-SHA:EXP-DES-CBC-SHA:EXP-RC2-CBC-MD5:EXP-RC4-MD5"
local exportrsa_tls_cipher_list_hex = "00,62, 00,61, 00,64, 00,60, 00,14, 00,0E, 00,08, 00,06, 00,03"
local exportrsa_ssl2_cipher_list_hex = "04,00,80, 02,00,80"
2017-01-04 16:31:13 +01:00
local detected_ssl2_ciphers
2017-07-03 22:24:02 +02:00
local addtl_warning = "" hexc
2016-11-17 23:27:27 +01:00
local cve = "CVE-2015-0204"
local cwe = "CWE-310"
2016-11-23 09:46:11 +01:00
local hint = ""
Use sockets for run_freak()
This PR changes `run_freak()` to use sockets.
I added two ciphers to `$exportrsa_cipher_list`: EXP1024-RC4-MD5 (0x00,0x60) and EXP1024-RC2-CBC-MD5 (0x00,0x61). So, the list is now as follows:
```
0x00,0x62 - EXP1024-DES-CBC-SHA TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=DES(56) Mac=SHA1 export
0x00,0x61 - EXP1024-RC2-CBC-MD5 TLS_RSA_EXPORT1024_WITH_RC2_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC2(56) Mac=MD5 export
0x00,0x64 - EXP1024-RC4-SHA TLS_RSA_EXPORT1024_WITH_RC4_56_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=SHA1 export
0x00,0x60 - EXP1024-RC4-MD5 TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=MD5 export
0x00,0x14 - EXP-EDH-RSA-DES-CBC-SHA TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x08 - EXP-DES-CBC-SHA TLS_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=RSA(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x06 - EXP-RC2-CBC-MD5 TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x03 - EXP-RC4-MD5 TLS_RSA_EXPORT_WITH_RC4_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
0x02,0x00,0x80 - EXP-RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
```
2016-12-21 19:16:10 +01:00
local using_sockets = true
2016-01-23 19:18:33 +01:00
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for FREAK attack " && outln
2016-11-17 23:27:27 +01:00
pr_bold " FREAK" ; out " ( $cve ) "
2015-09-17 15:30:15 +02:00
2017-01-21 16:52:02 +01:00
" $SSL_NATIVE " && using_sockets = false
Use sockets for run_freak()
This PR changes `run_freak()` to use sockets.
I added two ciphers to `$exportrsa_cipher_list`: EXP1024-RC4-MD5 (0x00,0x60) and EXP1024-RC2-CBC-MD5 (0x00,0x61). So, the list is now as follows:
```
0x00,0x62 - EXP1024-DES-CBC-SHA TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=DES(56) Mac=SHA1 export
0x00,0x61 - EXP1024-RC2-CBC-MD5 TLS_RSA_EXPORT1024_WITH_RC2_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC2(56) Mac=MD5 export
0x00,0x64 - EXP1024-RC4-SHA TLS_RSA_EXPORT1024_WITH_RC4_56_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=SHA1 export
0x00,0x60 - EXP1024-RC4-MD5 TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=MD5 export
0x00,0x14 - EXP-EDH-RSA-DES-CBC-SHA TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x08 - EXP-DES-CBC-SHA TLS_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=RSA(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x06 - EXP-RC2-CBC-MD5 TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x03 - EXP-RC4-MD5 TLS_RSA_EXPORT_WITH_RC4_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
0x02,0x00,0x80 - EXP-RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
```
2016-12-21 19:16:10 +01:00
if " $using_sockets " ; then
nr_supported_ciphers = $( count_words " $exportrsa_tls_cipher_list_hex " ) +$( count_words " $exportrsa_ssl2_cipher_list_hex " )
else
nr_supported_ciphers = $( count_ciphers $( actually_supported_ciphers $exportrsa_cipher_list ) )
fi
2015-09-17 15:30:15 +02:00
#echo "========= ${PIPESTATUS[*]}
2015-09-21 14:03:48 +02:00
case $nr_supported_ciphers in
2017-02-25 16:31:30 +01:00
0) prln_local_problem " $OPENSSL doesn't have any EXPORT RSA ciphers configured "
2016-11-17 23:27:27 +01:00
fileout "freak" "WARN" " FREAK: Not tested. $OPENSSL doesn't have any EXPORT RSA ciphers configured " " $cve " " $cwe "
2016-01-23 19:18:33 +01:00
return 7
; ;
2015-09-17 15:30:15 +02:00
1| 2| 3)
2015-09-21 14:03:48 +02:00
addtl_warning = " ( $magenta " " tested only with $nr_supported_ciphers out of 9 ciphers only! $off ) " ; ;
2015-09-17 15:30:15 +02:00
4| 5| 6| 7)
2015-09-21 14:03:48 +02:00
addtl_warning = " (tested with $nr_supported_ciphers /9 ciphers) " ; ;
2017-01-21 16:52:02 +01:00
8| 9| 10| 11)
addtl_warning = "" ; ;
2015-09-17 15:30:15 +02:00
esac
Use sockets for run_freak()
This PR changes `run_freak()` to use sockets.
I added two ciphers to `$exportrsa_cipher_list`: EXP1024-RC4-MD5 (0x00,0x60) and EXP1024-RC2-CBC-MD5 (0x00,0x61). So, the list is now as follows:
```
0x00,0x62 - EXP1024-DES-CBC-SHA TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=DES(56) Mac=SHA1 export
0x00,0x61 - EXP1024-RC2-CBC-MD5 TLS_RSA_EXPORT1024_WITH_RC2_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC2(56) Mac=MD5 export
0x00,0x64 - EXP1024-RC4-SHA TLS_RSA_EXPORT1024_WITH_RC4_56_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=SHA1 export
0x00,0x60 - EXP1024-RC4-MD5 TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=MD5 export
0x00,0x14 - EXP-EDH-RSA-DES-CBC-SHA TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x08 - EXP-DES-CBC-SHA TLS_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=RSA(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x06 - EXP-RC2-CBC-MD5 TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x03 - EXP-RC4-MD5 TLS_RSA_EXPORT_WITH_RC4_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
0x02,0x00,0x80 - EXP-RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
```
2016-12-21 19:16:10 +01:00
if " $using_sockets " ; then
tls_sockets "03" " $exportrsa_tls_cipher_list_hex "
sclient_success = $?
[ [ $sclient_success -eq 2 ] ] && sclient_success = 0
if [ [ $sclient_success -ne 0 ] ] ; then
2017-01-04 16:31:13 +01:00
sslv2_sockets " $exportrsa_ssl2_cipher_list_hex " "true"
if [ [ $? -eq 3 ] ] && [ [ " $V2_HELLO_CIPHERSPEC_LENGTH " -ne 0 ] ] ; then
exportrsa_ssl2_cipher_list_hex = " $( strip_spaces " ${ exportrsa_ssl2_cipher_list_hex //,/ } " ) "
len = ${# exportrsa_ssl2_cipher_list_hex }
detected_ssl2_ciphers = " $( grep "Supported cipher: " " $TEMPDIR / $NODEIP .parse_sslv2_serverhello.txt " ) "
for ( ( i = 0; i<len; i = i+6 ) ) ; do
[ [ " $detected_ssl2_ciphers " = ~ " x ${ exportrsa_ssl2_cipher_list_hex : i : 6 } " ] ] && sclient_success = 0 && break
done
fi
Use sockets for run_freak()
This PR changes `run_freak()` to use sockets.
I added two ciphers to `$exportrsa_cipher_list`: EXP1024-RC4-MD5 (0x00,0x60) and EXP1024-RC2-CBC-MD5 (0x00,0x61). So, the list is now as follows:
```
0x00,0x62 - EXP1024-DES-CBC-SHA TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=DES(56) Mac=SHA1 export
0x00,0x61 - EXP1024-RC2-CBC-MD5 TLS_RSA_EXPORT1024_WITH_RC2_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC2(56) Mac=MD5 export
0x00,0x64 - EXP1024-RC4-SHA TLS_RSA_EXPORT1024_WITH_RC4_56_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=SHA1 export
0x00,0x60 - EXP1024-RC4-MD5 TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=MD5 export
0x00,0x14 - EXP-EDH-RSA-DES-CBC-SHA TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x08 - EXP-DES-CBC-SHA TLS_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=RSA(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x06 - EXP-RC2-CBC-MD5 TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x03 - EXP-RC4-MD5 TLS_RSA_EXPORT_WITH_RC4_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
0x02,0x00,0x80 - EXP-RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
```
2016-12-21 19:16:10 +01:00
fi
else
2017-07-03 22:24:02 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -cipher $exportrsa_cipher_list -connect $NODEIP : $PORT $PROXY $SNI -no_ssl2 " ) >$TMPFILE 2>$ERRFILE </dev/null
Use sockets for run_freak()
This PR changes `run_freak()` to use sockets.
I added two ciphers to `$exportrsa_cipher_list`: EXP1024-RC4-MD5 (0x00,0x60) and EXP1024-RC2-CBC-MD5 (0x00,0x61). So, the list is now as follows:
```
0x00,0x62 - EXP1024-DES-CBC-SHA TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=DES(56) Mac=SHA1 export
0x00,0x61 - EXP1024-RC2-CBC-MD5 TLS_RSA_EXPORT1024_WITH_RC2_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC2(56) Mac=MD5 export
0x00,0x64 - EXP1024-RC4-SHA TLS_RSA_EXPORT1024_WITH_RC4_56_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=SHA1 export
0x00,0x60 - EXP1024-RC4-MD5 TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=MD5 export
0x00,0x14 - EXP-EDH-RSA-DES-CBC-SHA TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x08 - EXP-DES-CBC-SHA TLS_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=RSA(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x06 - EXP-RC2-CBC-MD5 TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x03 - EXP-RC4-MD5 TLS_RSA_EXPORT_WITH_RC4_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
0x02,0x00,0x80 - EXP-RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
```
2016-12-21 19:16:10 +01:00
sclient_connect_successful $? $TMPFILE
sclient_success = $?
2017-07-26 22:37:50 +02:00
debugme egrep -a "error|failure" $ERRFILE | egrep -av "unable to get local|verify error"
Use sockets for run_freak()
This PR changes `run_freak()` to use sockets.
I added two ciphers to `$exportrsa_cipher_list`: EXP1024-RC4-MD5 (0x00,0x60) and EXP1024-RC2-CBC-MD5 (0x00,0x61). So, the list is now as follows:
```
0x00,0x62 - EXP1024-DES-CBC-SHA TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=DES(56) Mac=SHA1 export
0x00,0x61 - EXP1024-RC2-CBC-MD5 TLS_RSA_EXPORT1024_WITH_RC2_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC2(56) Mac=MD5 export
0x00,0x64 - EXP1024-RC4-SHA TLS_RSA_EXPORT1024_WITH_RC4_56_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=SHA1 export
0x00,0x60 - EXP1024-RC4-MD5 TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=MD5 export
0x00,0x14 - EXP-EDH-RSA-DES-CBC-SHA TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x08 - EXP-DES-CBC-SHA TLS_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=RSA(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x06 - EXP-RC2-CBC-MD5 TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x03 - EXP-RC4-MD5 TLS_RSA_EXPORT_WITH_RC4_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
0x02,0x00,0x80 - EXP-RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
```
2016-12-21 19:16:10 +01:00
if [ [ $sclient_success -ne 0 ] ] && " $HAS_SSL2 " ; then
$OPENSSL s_client $STARTTLS $BUGS -cipher $exportrsa_cipher_list -connect $NODEIP :$PORT $PROXY -ssl2 >$TMPFILE 2>$ERRFILE </dev/null
sclient_connect_successful $? $TMPFILE
sclient_success = $?
fi
fi
2015-10-11 23:07:16 +02:00
if [ [ $sclient_success -eq 0 ] ] ; then
2016-03-01 20:31:26 +01:00
pr_svrty_critical "VULNERABLE (NOT ok)" ; out ", uses EXPORT RSA ciphers"
2016-11-17 23:27:27 +01:00
fileout "freak" "CRITICAL" "FREAK: VULNERABLE, uses EXPORT RSA ciphers" " $cve " " $cwe " " $hint "
2015-09-17 15:30:15 +02:00
else
2016-03-01 20:39:30 +01:00
pr_done_best "not vulnerable (OK)" ; out " $addtl_warning "
2016-11-17 23:27:27 +01:00
fileout "freak" "OK" " FREAK: not vulnerable $addtl_warning " " $cve " " $cwe "
2015-09-17 15:30:15 +02:00
fi
outln
Use sockets for run_freak()
This PR changes `run_freak()` to use sockets.
I added two ciphers to `$exportrsa_cipher_list`: EXP1024-RC4-MD5 (0x00,0x60) and EXP1024-RC2-CBC-MD5 (0x00,0x61). So, the list is now as follows:
```
0x00,0x62 - EXP1024-DES-CBC-SHA TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=DES(56) Mac=SHA1 export
0x00,0x61 - EXP1024-RC2-CBC-MD5 TLS_RSA_EXPORT1024_WITH_RC2_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC2(56) Mac=MD5 export
0x00,0x64 - EXP1024-RC4-SHA TLS_RSA_EXPORT1024_WITH_RC4_56_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=SHA1 export
0x00,0x60 - EXP1024-RC4-MD5 TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=MD5 export
0x00,0x14 - EXP-EDH-RSA-DES-CBC-SHA TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x08 - EXP-DES-CBC-SHA TLS_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=RSA(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x06 - EXP-RC2-CBC-MD5 TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x03 - EXP-RC4-MD5 TLS_RSA_EXPORT_WITH_RC4_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
0x02,0x00,0x80 - EXP-RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
```
2016-12-21 19:16:10 +01:00
if [ [ $DEBUG -ge 2 ] ] ; then
if " $using_sockets " ; then
for hexc in $( sed 's/, / /g' <<< " $exportrsa_tls_cipher_list_hex , $exportrsa_ssl2_cipher_list_hex " ) ; do
if [ [ ${# hexc } -eq 5 ] ] ; then
hexc = " 0x ${ hexc : 0 : 2 } ,0x ${ hexc : 3 : 2 } "
else
hexc = " 0x ${ hexc : 0 : 2 } ,0x ${ hexc : 3 : 2 } ,0x ${ hexc : 6 : 2 } "
fi
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
[ [ " $hexc " = = " ${ TLS_CIPHER_HEXCODE [i] } " ] ] && break
done
2017-02-25 16:31:30 +01:00
[ [ $i -eq $TLS_NR_CIPHERS ] ] && tm_out " $hexc " || tm_out " ${ TLS_CIPHER_OSSL_NAME [i] } "
Use sockets for run_freak()
This PR changes `run_freak()` to use sockets.
I added two ciphers to `$exportrsa_cipher_list`: EXP1024-RC4-MD5 (0x00,0x60) and EXP1024-RC2-CBC-MD5 (0x00,0x61). So, the list is now as follows:
```
0x00,0x62 - EXP1024-DES-CBC-SHA TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=DES(56) Mac=SHA1 export
0x00,0x61 - EXP1024-RC2-CBC-MD5 TLS_RSA_EXPORT1024_WITH_RC2_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC2(56) Mac=MD5 export
0x00,0x64 - EXP1024-RC4-SHA TLS_RSA_EXPORT1024_WITH_RC4_56_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=SHA1 export
0x00,0x60 - EXP1024-RC4-MD5 TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=MD5 export
0x00,0x14 - EXP-EDH-RSA-DES-CBC-SHA TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x08 - EXP-DES-CBC-SHA TLS_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=RSA(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x06 - EXP-RC2-CBC-MD5 TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x03 - EXP-RC4-MD5 TLS_RSA_EXPORT_WITH_RC4_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
0x02,0x00,0x80 - EXP-RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
```
2016-12-21 19:16:10 +01:00
done
2017-02-25 16:31:30 +01:00
tmln_out
Use sockets for run_freak()
This PR changes `run_freak()` to use sockets.
I added two ciphers to `$exportrsa_cipher_list`: EXP1024-RC4-MD5 (0x00,0x60) and EXP1024-RC2-CBC-MD5 (0x00,0x61). So, the list is now as follows:
```
0x00,0x62 - EXP1024-DES-CBC-SHA TLS_RSA_EXPORT1024_WITH_DES_CBC_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=DES(56) Mac=SHA1 export
0x00,0x61 - EXP1024-RC2-CBC-MD5 TLS_RSA_EXPORT1024_WITH_RC2_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC2(56) Mac=MD5 export
0x00,0x64 - EXP1024-RC4-SHA TLS_RSA_EXPORT1024_WITH_RC4_56_SHA SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=SHA1 export
0x00,0x60 - EXP1024-RC4-MD5 TLS_RSA_EXPORT1024_WITH_RC4_56_MD5 SSLv3 Kx=RSA(1024) Au=RSA Enc=RC4(56) Mac=MD5 export
0x00,0x14 - EXP-EDH-RSA-DES-CBC-SHA TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x08 - EXP-DES-CBC-SHA TLS_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=RSA(512) Au=RSA Enc=DES(40) Mac=SHA1 export
0x00,0x06 - EXP-RC2-CBC-MD5 TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x04,0x00,0x80 - EXP-RC2-CBC-MD5 SSL_CK_RC2_128_CBC_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC2(40) Mac=MD5 export
0x00,0x0E - EXP-DH-RSA-DES-CBC-SHA TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA SSLv3 Kx=DH/RSA Au=DH Enc=DES(40) Mac=SHA1 export
0x00,0x03 - EXP-RC4-MD5 TLS_RSA_EXPORT_WITH_RC4_40_MD5 SSLv3 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
0x02,0x00,0x80 - EXP-RC4-MD5 SSL_CK_RC4_128_EXPORT40_WITH_MD5 SSLv2 Kx=RSA(512) Au=RSA Enc=RC4(40) Mac=MD5 export
```
2016-12-21 19:16:10 +01:00
else
echo $( actually_supported_ciphers $exportrsa_cipher_list )
fi
fi
2015-09-21 14:03:48 +02:00
debugme echo $nr_supported_ciphers
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
return $ret
2015-05-17 22:43:53 +02:00
}
2017-01-21 16:52:02 +01:00
# see https://weakdh.org/upported_ciphers/ogjam.html
2015-07-22 13:11:20 +02:00
run_logjam( ) {
2015-10-11 23:07:16 +02:00
local -i sclient_success = 0
2017-01-18 22:05:27 +01:00
local exportdh_cipher_list = "EXP1024-DHE-DSS-DES-CBC-SHA:EXP1024-DHE-DSS-RC4-SHA:EXP-EDH-RSA-DES-CBC-SHA:EXP-EDH-DSS-DES-CBC-SHA"
local exportdh_cipher_list_hex = "00,63, 00,65, 00,14, 00,11"
2017-01-21 16:52:02 +01:00
local all_dh_ciphers = "cc,15, 00,b3, 00,91, c0,97, 00,a3, 00,9f, cc,aa, c0,a3, c0,9f, 00,6b, 00,6a, 00,39, 00,38, 00,c4, 00,c3, 00,88, 00,87, 00,a7, 00,6d, 00,3a, 00,c5, 00,89, 00,ab, cc,ad, c0,a7, c0,43, c0,45, c0,47, c0,53, c0,57, c0,5b, c0,67, c0,6d, c0,7d, c0,81, c0,85, c0,91, 00,a2, 00,9e, c0,a2, c0,9e, 00,aa, c0,a6, 00,67, 00,40, 00,33, 00,32, 00,be, 00,bd, 00,9a, 00,99, 00,45, 00,44, 00,a6, 00,6c, 00,34, 00,bf, 00,9b, 00,46, 00,b2, 00,90, c0,96, c0,42, c0,44, c0,46, c0,52, c0,56, c0,5a, c0,66, c0,6c, c0,7c, c0,80, c0,84, c0,90, 00,66, 00,18, 00,8e, 00,16, 00,13, 00,1b, 00,8f, 00,63, 00,15, 00,12, 00,1a, 00,65, 00,14, 00,11, 00,19, 00,17, 00,b5, 00,b4, 00,2d" # 93 ciphers
2017-01-19 14:45:19 +01:00
local -i i nr_supported_ciphers = 0 server_key_exchange_len = 0 ephemeral_pub_len = 0 len_dh_p = 0
2016-12-21 20:55:01 +01:00
local addtl_warning = "" hexc
2016-11-17 23:27:27 +01:00
local cve = "CVE-2015-4000"
local cwe = "CWE-310"
2016-11-23 09:46:11 +01:00
local hint = ""
2017-02-14 16:18:27 +01:00
local server_key_exchange ephemeral_pub key_bitstring = ""
2017-01-19 14:45:19 +01:00
local dh_p = ""
2017-01-18 15:53:01 +01:00
local spaces = " "
2017-01-18 22:05:27 +01:00
local vuln_exportdh_ciphers = false
local common_primes_file = " $TESTSSL_INSTALL_DIR /etc/common-primes.txt "
2017-01-19 14:45:19 +01:00
local comment = "" str = ""
2017-01-18 22:05:27 +01:00
local -i lineno_matched = 0
local -i ret
2017-01-19 14:45:19 +01:00
local using_sockets = true
2015-09-17 15:30:15 +02:00
2016-01-23 19:18:33 +01:00
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for LOGJAM vulnerability " && outln
2016-11-17 23:27:27 +01:00
pr_bold " LOGJAM" ; out " ( $cve ), experimental "
2015-09-17 15:30:15 +02:00
2017-01-21 16:52:02 +01:00
" $SSL_NATIVE " && using_sockets = false
2017-01-21 18:08:31 +01:00
# Also as the openssl binary distributed has everything we need measurements show that
2017-01-21 16:52:02 +01:00
# there's no impact whether we use sockets or TLS here, so the default is sockets here
2017-01-19 14:45:19 +01:00
if ! " $using_sockets " ; then
2017-01-18 22:05:27 +01:00
nr_supported_ciphers = $( count_ciphers $( actually_supported_ciphers $exportdh_cipher_list ) )
2017-01-19 14:45:19 +01:00
debugme echo $nr_supported_ciphers
case $nr_supported_ciphers in
2017-02-25 16:31:30 +01:00
0) prln_local_problem " $OPENSSL doesn't have any DH EXPORT ciphers configured "
2017-01-19 14:45:19 +01:00
fileout "logjam" "WARN" " LOGJAM: Not tested. $OPENSSL doesn't have any DH EXPORT ciphers configured " " $cve " " $cwe "
2017-01-21 16:52:02 +01:00
return 1 # we could continue here testing common primes but the logjam test would be not complete and it's misleading/hard to code+display
2017-01-19 14:45:19 +01:00
; ;
2017-01-21 16:52:02 +01:00
1| 2| 3) addtl_warning = " ( $magenta " " tested w/ $nr_supported_ciphers /4 ciphers only! $off ) " ; ;
2017-01-19 14:45:19 +01:00
4) ; ;
esac
2016-12-21 20:55:01 +01:00
fi
2015-09-17 15:30:15 +02:00
2017-01-18 22:05:27 +01:00
# test for DH export ciphers first
2016-12-21 20:55:01 +01:00
if " $using_sockets " ; then
2017-01-18 22:05:27 +01:00
tls_sockets "03" " $exportdh_cipher_list_hex "
2016-12-21 20:55:01 +01:00
sclient_success = $?
[ [ $sclient_success -eq 2 ] ] && sclient_success = 0
else
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -cipher $exportdh_cipher_list -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
2016-12-21 20:55:01 +01:00
sclient_connect_successful $? $TMPFILE
sclient_success = $?
debugme egrep -a "error|failure" $ERRFILE | egrep -av "unable to get local|verify error"
fi
2017-01-18 22:05:27 +01:00
[ [ $sclient_success -eq 0 ] ] && \
vuln_exportdh_ciphers = true || \
vuln_exportdh_ciphers = false
2015-09-17 15:30:15 +02:00
2016-12-21 20:55:01 +01:00
if [ [ $DEBUG -ge 2 ] ] ; then
if " $using_sockets " ; then
2017-01-18 22:05:27 +01:00
for hexc in $( sed 's/, / /g' <<< " $exportdh_cipher_list_hex " ) ; do
2016-12-21 20:55:01 +01:00
hexc = " 0x ${ hexc : 0 : 2 } ,0x ${ hexc : 3 : 2 } "
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
[ [ " $hexc " = = " ${ TLS_CIPHER_HEXCODE [i] } " ] ] && break
done
2017-02-25 16:31:30 +01:00
[ [ $i -eq $TLS_NR_CIPHERS ] ] && tm_out " $hexc " || tm_out " ${ TLS_CIPHER_OSSL_NAME [i] } "
2016-12-21 20:55:01 +01:00
done
2017-02-25 16:31:30 +01:00
tmln_out
2016-12-21 20:55:01 +01:00
else
2017-01-18 22:05:27 +01:00
echo $( actually_supported_ciphers $exportdh_cipher_list )
2016-12-21 20:55:01 +01:00
fi
fi
2015-09-17 15:30:15 +02:00
2016-12-29 22:31:42 +01:00
# Try all ciphers that use an ephemeral DH key. If successful, check whether the key uses a weak prime.
if " $using_sockets " ; then
2017-01-19 14:45:19 +01:00
tls_sockets "03" " $all_dh_ciphers " "ephemeralkey"
2016-12-29 22:31:42 +01:00
sclient_success = $?
if [ [ $sclient_success -eq 0 ] ] || [ [ $sclient_success -eq 2 ] ] ; then
cp " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " $TMPFILE
key_bitstring = " $( awk '/-----BEGIN PUBLIC KEY/,/-----END PUBLIC KEY/ { print $0 }' $TMPFILE ) "
fi
else
2017-01-21 16:52:02 +01:00
# FIXME: determine # of ciphers supported, 48 only are the shipped binaries
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -cipher kEDH -msg -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
2016-12-29 22:31:42 +01:00
sclient_connect_successful $? $TMPFILE
if [ [ $? -eq 0 ] ] && grep -q ServerKeyExchange $TMPFILE ; then
2017-01-19 14:45:19 +01:00
# Example: '<<< TLS 1.0 Handshake [length 010b], ServerKeyExchange'
# get line with ServerKeyExchange, cut from the beginning to "length ". cut from the end to ']'
str = " $( awk '/<<< TLS 1.[0-2].*ServerKeyExchange$/' $TMPFILE ) "
2017-09-18 17:50:06 +02:00
if [ [ -z " $str " ] ] ; then
str = " $( awk '/<<< SSL [2-3].*ServerKeyExchange$/' $TMPFILE ) "
fi
2017-01-19 14:45:19 +01:00
str = " ${ str #<*length } "
str = " ${ str %]* } "
server_key_exchange_len = $( hex2dec " $str " )
2016-12-29 22:31:42 +01:00
server_key_exchange_len = 2+$server_key_exchange_len /16
server_key_exchange = " $( grep -A $server_key_exchange_len ServerKeyExchange $TMPFILE | tail -n +2) "
server_key_exchange = " $( toupper " $( strip_spaces " $( newline_to_spaces " $server_key_exchange " ) " ) " ) "
server_key_exchange = " ${ server_key_exchange %%[!0-9A-F]* } "
server_key_exchange_len = ${# server_key_exchange }
[ [ $server_key_exchange_len -gt 8 ] ] && [ [ " ${ server_key_exchange : 0 : 2 } " = = "0C" ] ] && ephemeral_pub_len = $( hex2dec " ${ server_key_exchange : 2 : 6 } " )
2016-12-29 22:45:46 +01:00
[ [ $ephemeral_pub_len -ne 0 ] ] && [ [ $ephemeral_pub_len -le $server_key_exchange_len ] ] && key_bitstring = " $( get_dh_ephemeralkey " ${ server_key_exchange : 8 } " ) "
2016-12-29 22:31:42 +01:00
fi
fi
2017-01-18 22:05:27 +01:00
# now the final test for common primes
2016-12-29 22:31:42 +01:00
if [ [ -n " $key_bitstring " ] ] ; then
2017-10-27 19:07:04 +02:00
dh_p = " $( $OPENSSL pkey -pubin -text -noout 2>>$ERRFILE <<< " $key_bitstring " | awk '/prime:/,/generator:/' | egrep -v "prime|generator" ) "
2016-12-29 22:31:42 +01:00
dh_p = " $( strip_spaces " $( colon_to_spaces " $( newline_to_spaces " $dh_p " ) " ) " ) "
2016-12-30 17:32:41 +01:00
[ [ " ${ dh_p : 0 : 2 } " = = "00" ] ] && dh_p = " ${ dh_p : 2 } "
2017-01-19 14:45:19 +01:00
len_dh_p = " $(( 4 * ${# dh_p } )) "
2017-02-25 16:31:30 +01:00
debugme tmln_out " len(dh_p): $len_dh_p | dh_p: $dh_p "
2017-01-18 15:53:01 +01:00
echo " $dh_p " > $TEMPDIR /dh_p.txt
2017-01-18 22:05:27 +01:00
if [ [ ! -s " $common_primes_file " ] ] ; then
2017-02-25 16:31:30 +01:00
prln_local_problem " couldn't read common primes file $common_primes_file "
2017-01-18 22:05:27 +01:00
out " ${ spaces } "
2017-08-29 15:03:47 +02:00
fileout "LOGJAM_common primes_Problem" "WARN" " couldn't read common primes file $common_primes_file "
2017-01-18 22:05:27 +01:00
ret = 7
else
dh_p = " $( toupper " $dh_p " ) "
# In the previous line of the match is bascially the hint we want to echo
2017-09-18 17:50:06 +02:00
# the most elegant thing to get the previous line [ awk '/regex/ { print x }; { x=$0 }' ] doesn't work with gawk
2017-01-18 22:05:27 +01:00
lineno_matched = $( grep -n " $dh_p " " $common_primes_file " 2>/dev/null | awk -F':' '{ print $1 }' )
if [ [ " $lineno_matched " -ne 0 ] ] ; then
comment = " $( awk " NR == $lineno_matched -1 " " $common_primes_file " | awk -F'"' '{ print $2 }' ) "
2017-01-19 14:45:19 +01:00
ret = 1 # vulnerable: common prime
2017-01-18 22:05:27 +01:00
else
2017-01-19 14:45:19 +01:00
ret = 0 # not vulnerable: no known common prime
2017-01-18 22:05:27 +01:00
fi
fi
2017-01-18 15:53:01 +01:00
else
2017-01-19 14:45:19 +01:00
ret = 3 # no DH key detected
2017-01-18 22:05:27 +01:00
fi
# now the final verdict
# we only use once the color here on the screen, so screen and fileout SEEM to be inconsistent
if " $vuln_exportdh_ciphers " ; then
2017-01-21 18:08:31 +01:00
pr_svrty_high "VULNERABLE (NOT ok):" ; out " uses DH EXPORT ciphers"
fileout "logjam" "HIGH" "LOGJAM: VULNERABLE, uses DH EXPORT ciphers" " $cve " " $cwe " " $hint "
2017-01-18 22:05:27 +01:00
if [ [ $ret -eq 3 ] ] ; then
out ", no DH key detected"
fileout "LOGJAM_common primes" "OK" "no DH key detected"
elif [ [ $ret -eq 1 ] ] ; then
out " \n ${ spaces } "
2017-01-19 14:45:19 +01:00
# now size matters -- i.e. the bit size ;-)
if [ [ $len_dh_p -le 512 ] ] ; then
2017-02-03 13:03:22 +01:00
pr_svrty_critical "VULNERABLE (NOT ok):" ; out " common prime " ; pr_italic " $comment " ; out " detected ( $len_dh_p bits) "
2017-01-19 14:45:19 +01:00
fileout "LOGJAM_common primes" "CRITICAL" " common prime \" $comment \" detected "
elif [ [ $len_dh_p -le 1024 ] ] ; then
2017-02-03 13:03:22 +01:00
pr_svrty_high "VULNERABLE (NOT ok):" ; out " common prime " ; pr_italic " $comment " ; out " detected ( $len_dh_p bits) "
2017-01-19 14:45:19 +01:00
fileout "LOGJAM_common primes" "HIGH" " common prime \" $comment \" detected "
elif [ [ $len_dh_p -le 1536 ] ] ; then
2017-02-03 13:03:22 +01:00
pr_svrty_medium " common prime with $len_dh_p bits detected: " ; pr_italic " $comment "
2017-01-19 14:45:19 +01:00
fileout "LOGJAM_common primes" "MEDIUM" " common prime \" $comment \" detected "
elif [ [ $len_dh_p -le 2048 ] ] ; then
2017-02-03 13:03:22 +01:00
pr_svrty_low " common prime with $len_dh_p bits detected: " ; pr_italic " $comment "
2017-01-19 14:45:19 +01:00
fileout "LOGJAM_common primes" "LOW" " common prime \" $comment \" detected "
else
2017-02-03 13:03:22 +01:00
out " common prime with $len_dh_p bits detected: " ; pr_italic " $comment "
2017-01-19 14:45:19 +01:00
fileout "LOGJAM_common primes" "INFO" " common prime \" $comment \" detected "
fi
2017-01-18 22:05:27 +01:00
elif [ [ $ret -eq 0 ] ] ; then
out " no common primes detected"
fileout "LOGJAM_common primes" "INFO" "no common primes detected"
elif [ [ $ret -eq 7 ] ] ; then
out "FIXME 1"
fi
2017-01-18 15:53:01 +01:00
else
2017-01-18 22:05:27 +01:00
if [ [ $ret -eq 1 ] ] ; then
2017-01-19 14:45:19 +01:00
# now size matters -- i.e. the bit size ;-)
if [ [ $len_dh_p -le 512 ] ] ; then
2017-02-03 13:03:22 +01:00
pr_svrty_critical "VULNERABLE (NOT ok):" ; out " uses common prime " ; pr_italic " $comment " ; out " ( $len_dh_p bits) "
2017-01-19 14:45:19 +01:00
fileout "LOGJAM_common primes" "CRITICAL" " common prime \" $comment \" detected "
elif [ [ $len_dh_p -le 1024 ] ] ; then
2017-02-03 13:03:22 +01:00
pr_svrty_high "VULNERABLE (NOT ok):" ; out " common prime " ; pr_italic " $comment " ; out " detected ( $len_dh_p bits) "
2017-01-19 14:45:19 +01:00
fileout "LOGJAM_common primes" "HIGH" " common prime \" $comment \" detected "
elif [ [ $len_dh_p -le 1536 ] ] ; then
2017-02-03 13:03:22 +01:00
pr_svrty_medium " Common prime with $len_dh_p bits detected: " ; pr_italic " $comment "
2017-01-19 14:45:19 +01:00
fileout "LOGJAM_common primes" "MEDIUM" " common prime \" $comment \" detected "
elif [ [ $len_dh_p -le 2048 ] ] ; then
2017-02-03 13:03:22 +01:00
pr_svrty_low " Common prime with $len_dh_p bits detected: " ; pr_italic " $comment "
2017-01-19 14:45:19 +01:00
fileout "LOGJAM_common primes" "LOW" " common prime \" $comment \" detected "
else
2017-02-03 13:03:22 +01:00
out " Common prime with $len_dh_p bits detected: " ; pr_italic " $comment "
2017-01-19 14:45:19 +01:00
fileout "LOGJAM_common primes" "INFO" " common prime \" $comment \" detected "
fi
2017-02-03 13:03:22 +01:00
outln ","
out " ${ spaces } but no DH EXPORT ciphers ${ addtl_warning } "
2017-01-18 22:05:27 +01:00
fileout "logjam" "OK" " LOGJAM: not vulnerable, no DH EXPORT ciphers, $addtl_warning " " $cve " " $cwe "
elif [ [ $ret -eq 3 ] ] ; then
pr_done_good "not vulnerable (OK):" ; out " no DH EXPORT ciphers ${ addtl_warning } "
fileout "logjam" "OK" " LOGJAM: not vulnerable, no DH EXPORT ciphers, $addtl_warning " " $cve " " $cwe "
out ", no DH key detected"
fileout "LOGJAM_common primes" "OK" "no DH key detected"
elif [ [ $ret -eq 0 ] ] ; then
pr_done_good "not vulnerable (OK):" ; out " no DH EXPORT ciphers ${ ddtl_warning } "
fileout "logjam" "OK" " LOGJAM: not vulnerable, no DH EXPORT ciphers, $addtl_warning " " $cve " " $cwe "
out ", no common primes detected"
2017-01-18 15:53:01 +01:00
fileout "LOGJAM_common primes" "OK" "no common primes detected"
2017-01-18 22:05:27 +01:00
elif [ [ $ret -eq 7 ] ] ; then
pr_done_good "partly not vulnerable:" ; out " no DH EXPORT ciphers ${ ddtl_warning } "
fileout "logjam" "OK" " LOGJAM: not vulnerable, no DH EXPORT ciphers, $addtl_warning " " $cve " " $cwe "
2017-01-18 15:53:01 +01:00
fi
fi
2017-01-18 22:05:27 +01:00
outln
tmpfile_handle $FUNCNAME .txt
2017-01-18 15:53:01 +01:00
return 0
}
2015-05-27 14:28:18 +02:00
2016-03-03 11:56:25 +01:00
run_drown( ) {
run_drown() when server has non-RSA certificates
This PR address a problem in `run_drown()` when the server does not support SSLv2, but does support multiple certificates or doesn't have an RSA certificate.
One example of the problem can be seen with www.facebook.com. If `run_server_preferences()` is run before `run_drown()`, then the results of `run_drown()` are:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 SHA256 A3F474FB17509AE6C5B6BA5E46B79E0DE6AF1BF1EEAA040A6114676E714C9965 could help you to find out
```
If only `run_drown()` is performed, then the result is:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 could help you to find out
```
However, A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 is the fingerprint of Facebook's ECDSA certificate, not its RSA certificate.
In addition, as noted in the "FIXME," `run_drown()` will display the warning "make sure you don't use this certificate elsewhere with SSLv2 enabled services" even if the server doesn't have an RSA certificate, even though SSLv2 can only use RSA certificates.
This PR fixes this issue by only showing the warning if the server has an RSA certificate and by ensuring that the `$cert_fingerprint_sha2` used to construct the "https://censys.io/ipv4?q=..." URL only contains a single SHA256 fingerprint and that it is the fingerprint of the server's RSA certificate.
2016-12-02 16:16:04 +01:00
local nr_ciphers_detected ret
2016-03-03 19:50:44 +01:00
local spaces = " "
local cert_fingerprint_sha2 = ""
2016-11-17 23:27:27 +01:00
local cve = "CVE-2016-0800, CVE-2016-0703"
local cwe = "CWE-310"
2016-11-23 09:46:11 +01:00
local hint = ""
2016-03-03 11:56:25 +01:00
if [ [ $VULN_COUNT -le $VULN_THRESHLD ] ] ; then
outln
pr_headlineln " Testing for DROWN vulnerability "
2016-03-03 19:50:44 +01:00
outln
2016-03-03 11:56:25 +01:00
fi
2016-03-03 19:50:44 +01:00
# if we want to use OPENSSL: check for < openssl 1.0.2g, openssl 1.0.1s if native openssl
2017-01-18 16:23:18 +01:00
pr_bold " DROWN" ; out " ( $cve ) "
2016-08-11 20:16:33 +02:00
2017-07-20 17:44:00 +02:00
# Any fingerprint that is placed in $RSA_CERT_FINGERPRINT_SHA2 is also added to
# to $CERT_FINGERPRINT_SHA2, so if $CERT_FINGERPRINT_SHA2 is not empty, but
# $RSA_CERT_FINGERPRINT_SHA2 is empty, then the server doesn't have an RSA certificate.
if [ [ -z " $CERT_FINGERPRINT_SHA2 " ] ] ; then
get_host_cert "-cipher aRSA"
[ [ $? -eq 0 ] ] && cert_fingerprint_sha2 = " $( $OPENSSL x509 -noout -in $HOSTCERT -fingerprint -sha256 2>>$ERRFILE | sed -e 's/^.*Fingerprint=//' -e 's/://g' ) "
else
cert_fingerprint_sha2 = " $RSA_CERT_FINGERPRINT_SHA2 "
2017-07-20 19:13:06 +02:00
cert_fingerprint_sha2 = ${ cert_fingerprint_sha2 /SHA256 / }
2017-07-20 17:44:00 +02:00
fi
sslv2_sockets
2016-03-03 19:50:44 +01:00
case $? in
7) # strange reply, couldn't convert the cipher spec length to a hex number
2017-02-25 16:31:30 +01:00
pr_fixme "strange v2 reply "
2016-03-03 19:50:44 +01:00
outln " (rerun with DEBUG >=2)"
2016-08-11 20:16:33 +02:00
[ [ $DEBUG -ge 3 ] ] && hexdump -C " $TEMPDIR / $NODEIP .sslv2_sockets.dd " | head -1
2016-03-03 19:50:44 +01:00
ret = 7
2016-11-17 23:27:27 +01:00
fileout "drown" "WARN" "SSLv2: received a strange SSLv2 reply (rerun with DEBUG>=2)" " $cve " " $cwe "
2016-03-03 19:50:44 +01:00
; ;
2017-07-20 17:44:00 +02:00
3) # vulnerable, [[ -n "$cert_fingerprint_sha2" ]] test is not needed as we should have RSA certificate here
2016-08-11 20:16:33 +02:00
lines = $( count_lines " $( hexdump -C " $TEMPDIR / $NODEIP .sslv2_sockets.dd " 2>/dev/null) " )
2017-02-25 16:31:30 +01:00
debugme tm_out " ( $lines lines) "
2016-03-03 19:50:44 +01:00
if [ [ " $lines " -gt 1 ] ] ; then
nr_ciphers_detected = $(( V2_HELLO_CIPHERSPEC_LENGTH / 3 ))
if [ [ 0 -eq " $nr_ciphers_detected " ] ] ; then
2017-09-20 17:22:54 +02:00
prln_svrty_high "CVE-2015-3197: SSLv2 supported but couldn't detect a cipher (NOT ok)" ;
fileout "drown" "HIGH" " SSLv2 offered, but could not detect a cipher (CVE-2015-3197. Make sure you don't use this certificate elsewhere, see https://censys.io/ipv4?q= $cert_fingerprint_sha2 " " $cve " " $cwe " " $hint "
2016-03-03 19:50:44 +01:00
else
2017-02-25 16:31:30 +01:00
prln_svrty_critical " VULNERABLE (NOT ok), SSLv2 offered with $nr_ciphers_detected ciphers " ;
2017-07-20 17:44:00 +02:00
fileout "drown" "CRITICAL" " VULNERABLE, SSLv2 offered with $nr_ciphers_detected ciphers. Make sure you don't use this certificate elsewhere, see https://censys.io/ipv4?q= $cert_fingerprint_sha2 " " $cve " " $cwe " " $hint "
2016-03-03 19:50:44 +01:00
fi
2017-07-20 17:44:00 +02:00
outln " $spaces Make sure you don't use this certificate elsewhere, see: "
out " $spaces "
pr_url " https://censys.io/ipv4?q= $cert_fingerprint_sha2 "
outln
2016-03-03 19:50:44 +01:00
fi
ret = 1
; ;
2017-07-20 17:44:00 +02:00
*) prln_done_best "not vulnerable on this host and port (OK)"
fileout "drown" "OK" "not vulnerable to DROWN on this host and port" " $cve " " $cwe "
run_drown() when server has non-RSA certificates
This PR address a problem in `run_drown()` when the server does not support SSLv2, but does support multiple certificates or doesn't have an RSA certificate.
One example of the problem can be seen with www.facebook.com. If `run_server_preferences()` is run before `run_drown()`, then the results of `run_drown()` are:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 SHA256 A3F474FB17509AE6C5B6BA5E46B79E0DE6AF1BF1EEAA040A6114676E714C9965 could help you to find out
```
If only `run_drown()` is performed, then the result is:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 could help you to find out
```
However, A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 is the fingerprint of Facebook's ECDSA certificate, not its RSA certificate.
In addition, as noted in the "FIXME," `run_drown()` will display the warning "make sure you don't use this certificate elsewhere with SSLv2 enabled services" even if the server doesn't have an RSA certificate, even though SSLv2 can only use RSA certificates.
This PR fixes this issue by only showing the warning if the server has an RSA certificate and by ensuring that the `$cert_fingerprint_sha2` used to construct the "https://censys.io/ipv4?q=..." URL only contains a single SHA256 fingerprint and that it is the fingerprint of the server's RSA certificate.
2016-12-02 16:16:04 +01:00
if [ [ -n " $cert_fingerprint_sha2 " ] ] ; then
outln " $spaces make sure you don't use this certificate elsewhere with SSLv2 enabled services "
2017-07-20 17:44:00 +02:00
out " $spaces "
pr_url " https://censys.io/ipv4?q= $cert_fingerprint_sha2 "
outln " could help you to find out"
fileout "drown" "INFO" " make sure you don't use this certificate elsewhere with SSLv2 enabled services, see https://censys.io/ipv4?q= $cert_fingerprint_sha2 "
2017-01-18 16:23:18 +01:00
else
outln " $spaces no RSA certificate, thus certificate can't be used with SSLv2 elsewhere "
fileout "drown" "INFO" "no RSA certificate, thus certificate can't be used with SSLv2 elsewhere"
2016-03-03 19:50:44 +01:00
fi
run_drown() when server has non-RSA certificates
This PR address a problem in `run_drown()` when the server does not support SSLv2, but does support multiple certificates or doesn't have an RSA certificate.
One example of the problem can be seen with www.facebook.com. If `run_server_preferences()` is run before `run_drown()`, then the results of `run_drown()` are:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 SHA256 A3F474FB17509AE6C5B6BA5E46B79E0DE6AF1BF1EEAA040A6114676E714C9965 could help you to find out
```
If only `run_drown()` is performed, then the result is:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 could help you to find out
```
However, A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 is the fingerprint of Facebook's ECDSA certificate, not its RSA certificate.
In addition, as noted in the "FIXME," `run_drown()` will display the warning "make sure you don't use this certificate elsewhere with SSLv2 enabled services" even if the server doesn't have an RSA certificate, even though SSLv2 can only use RSA certificates.
This PR fixes this issue by only showing the warning if the server has an RSA certificate and by ensuring that the `$cert_fingerprint_sha2` used to construct the "https://censys.io/ipv4?q=..." URL only contains a single SHA256 fingerprint and that it is the fingerprint of the server's RSA certificate.
2016-12-02 16:16:04 +01:00
ret = 0
2016-03-03 19:50:44 +01:00
; ;
esac
2016-03-03 11:56:25 +01:00
run_drown() when server has non-RSA certificates
This PR address a problem in `run_drown()` when the server does not support SSLv2, but does support multiple certificates or doesn't have an RSA certificate.
One example of the problem can be seen with www.facebook.com. If `run_server_preferences()` is run before `run_drown()`, then the results of `run_drown()` are:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 SHA256 A3F474FB17509AE6C5B6BA5E46B79E0DE6AF1BF1EEAA040A6114676E714C9965 could help you to find out
```
If only `run_drown()` is performed, then the result is:
```
DROWN (2016-0800, CVE-2016-0703) not vulnerable on this port (OK)
make sure you don't use this certificate elsewhere with SSLv2 enabled services
https://censys.io/ipv4?q=A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 could help you to find out
```
However, A626B154CC65634181250B810B1BD4C89EC277CEA08D785EEBE7E768BDA7BB00 is the fingerprint of Facebook's ECDSA certificate, not its RSA certificate.
In addition, as noted in the "FIXME," `run_drown()` will display the warning "make sure you don't use this certificate elsewhere with SSLv2 enabled services" even if the server doesn't have an RSA certificate, even though SSLv2 can only use RSA certificates.
This PR fixes this issue by only showing the warning if the server has an RSA certificate and by ensuring that the `$cert_fingerprint_sha2` used to construct the "https://censys.io/ipv4?q=..." URL only contains a single SHA256 fingerprint and that it is the fingerprint of the server's RSA certificate.
2016-12-02 16:16:04 +01:00
return $ret
2016-03-03 11:56:25 +01:00
}
2015-05-27 14:28:18 +02:00
2015-05-17 22:43:53 +02:00
# Browser Exploit Against SSL/TLS: don't use CBC Ciphers in SSLv3 TLSv1.0
2015-07-22 13:11:20 +02:00
run_beast( ) {
2017-09-19 18:37:03 +02:00
local hexc dash cbc_cipher sslvers auth mac export
2016-12-06 17:37:41 +01:00
local -a ciph hexcode normalized_hexcode kx enc export2
local proto proto_hex
2017-10-06 15:56:11 +02:00
local -i i ret nr_ciphers = 0 sclient_success = 0
2016-12-06 17:37:41 +01:00
local detected_cbc_ciphers = "" ciphers_to_test
2015-09-17 15:30:15 +02:00
local higher_proto_supported = ""
local vuln_beast = false
local spaces = " "
local cr = $'\n'
local first = true
local continued = false
2016-12-06 17:37:41 +01:00
local cbc_cipher_list = "ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:SRP-DSS-AES-256-CBC-SHA:SRP-RSA-AES-256-CBC-SHA:SRP-AES-256-CBC-SHA:DHE-PSK-AES256-CBC-SHA:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DH-RSA-AES256-SHA:DH-DSS-AES256-SHA:DHE-RSA-CAMELLIA256-SHA:DHE-DSS-CAMELLIA256-SHA:DH-RSA-CAMELLIA256-SHA:DH-DSS-CAMELLIA256-SHA:AECDH-AES256-SHA:ADH-AES256-SHA:ADH-CAMELLIA256-SHA:ECDH-RSA-AES256-SHA:ECDH-ECDSA-AES256-SHA:AES256-SHA:ECDHE-PSK-AES256-CBC-SHA:CAMELLIA256-SHA:RSA-PSK-AES256-CBC-SHA:PSK-AES256-CBC-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:SRP-DSS-AES-128-CBC-SHA:SRP-RSA-AES-128-CBC-SHA:SRP-AES-128-CBC-SHA:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:DH-RSA-AES128-SHA:DH-DSS-AES128-SHA:DHE-RSA-SEED-SHA:DHE-DSS-SEED-SHA:DH-RSA-SEED-SHA:DH-DSS-SEED-SHA:DHE-RSA-CAMELLIA128-SHA:DHE-DSS-CAMELLIA128-SHA:DH-RSA-CAMELLIA128-SHA:DH-DSS-CAMELLIA128-SHA:AECDH-AES128-SHA:ADH-AES128-SHA:ADH-SEED-SHA:ADH-CAMELLIA128-SHA:ECDH-RSA-AES128-SHA:ECDH-ECDSA-AES128-SHA:AES128-SHA:ECDHE-PSK-AES128-CBC-SHA:DHE-PSK-AES128-CBC-SHA:SEED-SHA:CAMELLIA128-SHA:IDEA-CBC-SHA:RSA-PSK-AES128-CBC-SHA:PSK-AES128-CBC-SHA:KRB5-IDEA-CBC-SHA:KRB5-IDEA-CBC-MD5:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:SRP-DSS-3DES-EDE-CBC-SHA:SRP-RSA-3DES-EDE-CBC-SHA:SRP-3DES-EDE-CBC-SHA:EDH-RSA-DES-CBC3-SHA:EDH-DSS-DES-CBC3-SHA:DH-RSA-DES-CBC3-SHA:DH-DSS-DES-CBC3-SHA:AECDH-DES-CBC3-SHA:ADH-DES-CBC3-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-ECDSA-DES-CBC3-SHA:DES-CBC3-SHA:RSA-PSK-3DES-EDE-CBC-SHA:PSK-3DES-EDE-CBC-SHA:KRB5-DES-CBC3-SHA:KRB5-DES-CBC3-MD5:ECDHE-PSK-3DES-EDE-CBC-SHA:DHE-PSK-3DES-EDE-CBC-SHA:EXP1024-DHE-DSS-DES-CBC-SHA:EDH-RSA-DES-CBC-SHA:EDH-DSS-DES-CBC-SHA:DH-RSA-DES-CBC-SHA:DH-DSS-DES-CBC-SHA:ADH-DES-CBC-SHA:EXP1024-DES-CBC-SHA:DES-CBC-SHA:KRB5-DES-CBC-SHA:KRB5-DES-CBC-MD5:EXP-EDH-RSA-DES-CBC-SHA:EXP-EDH-DSS-DES-CBC-SHA:EXP-ADH-DES-CBC-SHA:EXP-DES-CBC-SHA:EXP-RC2-CBC-MD5:EXP-KRB5-RC2-CBC-SHA:EXP-KRB5-DES-CBC-SHA:EXP-KRB5-RC2-CBC-MD5:EXP-KRB5-DES-CBC-MD5:EXP-DH-DSS-DES-CBC-SHA:EXP-DH-RSA-DES-CBC-SHA"
2017-01-21 18:08:31 +01:00
local cbc_ciphers_hex = "c0,14, c0,0a, c0,22, c0,21, c0,20, 00,91, 00,39, 00,38, 00,37, 00,36, 00,88, 00,87, 00,86, 00,85, c0,19, 00,3a, 00,89, c0,0f, c0,05, 00,35, c0,36, 00,84, 00,95, 00,8d, c0,13, c0,09, c0,1f, c0,1e, c0,1d, 00,33, 00,32, 00,31, 00,30, 00,9a, 00,99, 00,98, 00,97, 00,45, 00,44, 00,43, 00,42, c0,18, 00,34, 00,9b, 00,46, c0,0e, c0,04, 00,2f, c0,35, 00,90, 00,96, 00,41, 00,07, 00,94, 00,8c, 00,21, 00,25, c0,12, c0,08, c0,1c, c0,1b, c0,1a, 00,16, 00,13, 00,10, 00,0d, c0,17, 00,1b, c0,0d, c0,03, 00,0a, 00,93, 00,8b, 00,1f, 00,23, c0,34, 00,8f, 00,63, 00,15, 00,12, 00,0f, 00,0c, 00,1a, 00,62, 00,09, 00,1e, 00,22, 00,14, 00,11, 00,19, 00,08, 00,06, 00,27, 00,26, 00,2a, 00,29, 00,0b, 00,0e"
2016-12-06 17:37:41 +01:00
local has_dh_bits = " $HAS_DH_BITS "
local using_sockets = true
2016-11-17 23:27:27 +01:00
local cve = "CVE-2011-3389"
local cwe = "CWE-20"
2016-11-23 09:46:11 +01:00
local hint = ""
2015-09-17 15:30:15 +02:00
2016-02-20 14:10:04 +01:00
if [ [ $VULN_COUNT -le $VULN_THRESHLD ] ] ; then
2015-11-08 22:14:28 +01:00
outln
pr_headlineln " Testing for BEAST vulnerability "
2016-02-20 14:10:04 +01:00
fi
2017-07-26 22:37:50 +02:00
if [ [ $VULN_COUNT -le $VULN_THRESHLD ] ] ; then
2015-11-08 22:14:28 +01:00
outln
2015-09-17 15:30:15 +02:00
fi
2016-11-17 23:27:27 +01:00
pr_bold " BEAST" ; out " ( $cve ) "
2015-09-17 15:30:15 +02:00
2017-01-21 18:08:31 +01:00
" $SSL_NATIVE " && using_sockets = false
[ [ $TLS_NR_CIPHERS = = 0 ] ] && using_sockets = false
2016-12-06 17:37:41 +01:00
if " $using_sockets " || [ [ $OSSL_VER_MAJOR -lt 1 ] ] ; then
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
hexc = " ${ TLS_CIPHER_HEXCODE [i] } "
if [ [ ${# hexc } -eq 9 ] ] && [ [ " ${ TLS_CIPHER_RFC_NAME [i] } " = ~ CBC ] ] && \
2017-02-24 16:22:59 +01:00
[ [ ! " ${ TLS_CIPHER_RFC_NAME [i] } " = ~ SHA256 ] ] && [ [ ! " ${ TLS_CIPHER_RFC_NAME [i] } " = ~ SHA384 ] ] ; then
2016-12-06 17:37:41 +01:00
ciph[ nr_ciphers] = " ${ TLS_CIPHER_OSSL_NAME [i] } "
hexcode[ nr_ciphers] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } "
rfc_ciph[ nr_ciphers] = " ${ TLS_CIPHER_RFC_NAME [i] } "
kx[ nr_ciphers] = " ${ TLS_CIPHER_KX [i] } "
enc[ nr_ciphers] = " ${ TLS_CIPHER_ENC [i] } "
export2[ nr_ciphers] = " ${ TLS_CIPHER_EXPORT [i] } "
ossl_supported[ nr_ciphers] = ${ TLS_CIPHER_OSSL_SUPPORTED [i] }
if " $using_sockets " && " $WIDE " && ! " $has_dh_bits " && \
( [ [ ${ kx [nr_ciphers] } = = "Kx=ECDH" ] ] || [ [ ${ kx [nr_ciphers] } = = "Kx=DH" ] ] || [ [ ${ kx [nr_ciphers] } = = "Kx=EDH" ] ] ) ; then
ossl_supported[ nr_ciphers] = false
fi
if [ [ " ${ hexc : 2 : 2 } " = = "00" ] ] ; then
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 7 : 2 } "
else
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } "
fi
nr_ciphers += 1
fi
done
else
while read hexc dash ciph[ nr_ciphers] sslvers kx[ nr_ciphers] auth enc[ nr_ciphers] mac export2[ nr_ciphers] ; do
if [ [ " : ${ cbc_cipher_list } : " = ~ " : ${ ciph [nr_ciphers] } : " ] ] ; then
ossl_supported[ nr_ciphers] = true
if [ [ " ${ hexc : 2 : 2 } " = = "00" ] ] ; then
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 7 : 2 } "
else
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } "
fi
nr_ciphers += 1
fi
done < <( $OPENSSL ciphers -tls1 -V 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>>$ERRFILE )
fi
2015-10-01 13:27:14 +02:00
2016-02-03 00:05:57 +01:00
# first determine whether it's mitigated by higher protocols
2015-10-01 13:27:14 +02:00
for proto in tls1_1 tls1_2; do
2017-10-06 15:56:11 +02:00
ret = $( has_server_protocol " $proto " )
if [ [ $ret -eq 0 ] ] ; then
2017-10-02 14:55:57 +02:00
case $proto in
tls1_1) higher_proto_supported += " TLSv1.1" ; ;
tls1_2) higher_proto_supported += " TLSv1.2" ; ;
esac
2017-10-06 15:56:11 +02:00
elif [ [ $ret -eq 2 ] ] ; then
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options "-state -" $proto " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) 2>>$ERRFILE >$TMPFILE </dev/null
2017-10-02 14:55:57 +02:00
if sclient_connect_successful $? $TMPFILE ; then
higher_proto_supported += " $( get_protocol $TMPFILE ) "
add_tls_offered " $proto " yes
fi
2015-10-01 13:27:14 +02:00
fi
done
2015-09-17 15:30:15 +02:00
for proto in ssl3 tls1; do
2016-12-06 17:37:41 +01:00
if [ [ " $proto " = = "ssl3" ] ] && ! " $using_sockets " && ! locally_supported " - $proto " ; then
2016-08-23 18:37:22 +02:00
continued = true
out " "
continue
fi
2017-10-06 15:56:11 +02:00
ret = $( has_server_protocol " $proto " )
if [ [ $ret -eq 0 ] ] ; then
2017-10-02 14:55:57 +02:00
sclient_success = 0
2017-10-06 15:56:11 +02:00
elif [ [ $ret -eq 1 ] ] ; then
sclient_success = 1
2017-10-02 14:55:57 +02:00
elif [ [ " $proto " != "ssl3" ] ] || " $HAS_SSL3 " ; then
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options "-" $proto " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>>$ERRFILE </dev/null
2016-12-06 17:37:41 +01:00
sclient_connect_successful $? $TMPFILE
2017-10-02 14:55:57 +02:00
sclient_success = $?
2016-12-06 17:37:41 +01:00
else
tls_sockets "00" " $TLS_CIPHER "
2017-10-02 14:55:57 +02:00
sclient_success = $?
2016-12-06 17:37:41 +01:00
fi
2017-10-02 14:55:57 +02:00
if [ [ $sclient_success -ne 0 ] ] ; then # protocol supported?
2016-02-07 19:13:59 +01:00
if " $continued " ; then # second round: we hit TLS1
2016-12-06 17:37:41 +01:00
if " $HAS_SSL3 " || " $using_sockets " ; then
2017-02-25 16:31:30 +01:00
prln_done_good "no SSL3 or TLS1 (OK)"
2016-11-17 23:27:27 +01:00
fileout "beast" "OK" "BEAST: not vulnerable, no SSL3 or TLS1" " $cve " " $cwe "
2016-08-23 18:37:22 +02:00
else
2017-02-25 16:31:30 +01:00
prln_done_good "no TLS1 (OK)"
2016-11-18 00:44:24 +01:00
fileout "beast" "OK" "BEAST: not vulnerable, no TLS1" " $cve " " $cwe "
2016-08-23 18:37:22 +02:00
fi
2015-09-17 15:30:15 +02:00
return 0
2016-01-23 19:18:33 +01:00
else # protocol not succeeded but it's the first time
2015-09-17 15:30:15 +02:00
continued = true
2015-10-01 13:27:14 +02:00
continue # protocol not supported, so we do not need to check each cipher with that protocol
2015-09-17 15:30:15 +02:00
fi
fi # protocol succeeded
2017-10-02 14:55:57 +02:00
add_tls_offered " $proto " yes
2015-10-01 13:27:14 +02:00
# now we test in one shot with the precompiled ciphers
2016-12-06 17:37:41 +01:00
if " $using_sockets " ; then
case " $proto " in
"ssl3" ) proto_hex = "00" ; ;
"tls1" ) proto_hex = "01" ; ;
esac
2017-06-01 15:36:03 +02:00
tls_sockets " $proto_hex " " $cbc_ciphers_hex "
2016-12-06 17:37:41 +01:00
[ [ $? -eq 0 ] ] || continue
else
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options "-" $proto " -cipher " $cbc_cipher_list " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>>$ERRFILE </dev/null
2016-12-06 17:37:41 +01:00
sclient_connect_successful $? $TMPFILE || continue
fi
2015-09-17 15:30:15 +02:00
2016-12-06 17:37:41 +01:00
detected_cbc_ciphers = ""
2017-01-21 18:08:31 +01:00
for ( ( i = 0; i<nr_ciphers; i++) ) ; do
2016-12-06 17:37:41 +01:00
ciphers_found[ i] = false
sigalg[ nr_ciphers] = ""
done
while true; do
ciphers_to_test = ""
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
! " ${ ciphers_found [i] } " && " ${ ossl_supported [i] } " && ciphers_to_test += " : ${ ciph [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options "-cipher " ${ ciphers_to_test : 1 } " -" $proto " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>>$ERRFILE </dev/null
2016-12-06 17:37:41 +01:00
sclient_connect_successful $? $TMPFILE || break
2017-04-13 16:32:19 +02:00
cbc_cipher = $( get_cipher $TMPFILE )
2016-12-06 17:37:41 +01:00
[ [ -z " $cbc_cipher " ] ] && break
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
[ [ " $cbc_cipher " = = " ${ ciph [i] } " ] ] && break
done
ciphers_found[ i] = true
2017-02-27 16:34:04 +01:00
if [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] || [ [ " ${ rfc_ciph [i] } " = = "-" ] ] ; then
2017-02-13 22:07:25 +01:00
detected_cbc_ciphers += " ${ ciph [i] } "
else
detected_cbc_ciphers += " ${ rfc_ciph [i] } "
fi
2016-12-06 17:37:41 +01:00
vuln_beast = true
if " $WIDE " && ( [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ) ; then
dhlen = $( read_dhbits_from_file " $TMPFILE " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
2016-02-20 14:10:04 +01:00
fi
2016-12-06 17:37:41 +01:00
" $WIDE " && " $SHOW_SIGALGO " && grep -q "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TMPFILE && \
2017-10-27 16:34:04 +02:00
sigalg[ i] = " $( read_sigalg_from_file " $TMPFILE " ) "
2016-12-06 17:37:41 +01:00
done
if " $using_sockets " ; then
while true; do
ciphers_to_test = ""
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
! " ${ ciphers_found [i] } " && ciphers_to_test += " , ${ hexcode [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
if " $SHOW_SIGALGO " ; then
tls_sockets " $proto_hex " " ${ ciphers_to_test : 2 } , 00,ff " "all"
else
tls_sockets " $proto_hex " " ${ ciphers_to_test : 2 } , 00,ff " "ephemeralkey"
fi
[ [ $? -ne 0 ] ] && break
2017-04-13 16:32:19 +02:00
cbc_cipher = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
2016-12-06 17:37:41 +01:00
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
[ [ " $cbc_cipher " = = " ${ rfc_ciph [i] } " ] ] && break
done
ciphers_found[ i] = true
2017-02-27 16:34:04 +01:00
if ( [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] && [ [ " ${ ciph [i] } " != "-" ] ] ) || [ [ " ${ rfc_ciph [i] } " = = "-" ] ] ; then
2017-10-02 13:48:55 +02:00
detected_cbc_ciphers += " ${ ciph [i] } "
2016-12-06 17:37:41 +01:00
else
2017-10-02 13:48:55 +02:00
detected_cbc_ciphers += " ${ rfc_ciph [i] } "
2016-12-06 17:37:41 +01:00
fi
2016-10-28 15:30:07 +02:00
vuln_beast = true
2016-12-06 17:37:41 +01:00
if " $WIDE " && ( [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ) ; then
dhlen = $( read_dhbits_from_file " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
fi
" $WIDE " && " $SHOW_SIGALGO " && [ [ -r " $HOSTCERT " ] ] && \
2017-10-27 16:34:04 +02:00
sigalg[ i] = " $( read_sigalg_from_file " $HOSTCERT " ) "
2016-12-06 17:37:41 +01:00
done
fi
if " $WIDE " && [ [ -n " $detected_cbc_ciphers " ] ] ; then
out "\n " ; pr_underline " $( toupper $proto ) :\n " ;
if " $first " ; then
neat_header
2016-03-05 21:07:49 +01:00
fi
2016-12-06 17:37:41 +01:00
first = false
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
if " ${ ciphers_found [i] } " || " $SHOW_EACH_C " ; then
2016-12-20 18:59:26 +01:00
export = " ${ export2 [i] } "
2017-01-25 16:41:36 +01:00
neat_list " $( tolower " ${ normalized_hexcode [i] } " ) " " ${ ciph [i] } " " ${ kx [i] } " " ${ enc [i] } " " ${ ciphers_found [i] } "
2016-12-06 17:37:41 +01:00
if " $SHOW_EACH_C " ; then
if " ${ ciphers_found [i] } " ; then
if [ [ -n " $higher_proto_supported " ] ] ; then
2017-02-03 13:03:22 +01:00
pr_svrty_low "available"
2016-12-06 17:37:41 +01:00
else
pr_svrty_medium "available"
fi
2016-03-05 21:07:49 +01:00
else
2017-02-06 11:06:59 +01:00
pr_deemphasize "not a/v"
2016-03-05 21:07:49 +01:00
fi
fi
2016-12-06 17:37:41 +01:00
outln " ${ sigalg [i] } "
2017-02-14 16:18:27 +01:00
fi
2016-12-06 17:37:41 +01:00
done
fi
2015-09-17 15:30:15 +02:00
2016-02-07 19:13:59 +01:00
if ! " $WIDE " ; then
if [ [ -n " $detected_cbc_ciphers " ] ] ; then
2016-11-17 23:27:27 +01:00
fileout " cbc_ $proto " "MEDIUM" " BEAST: CBC ciphers for $( toupper $proto ) : $detected_cbc_ciphers " " $cve " " $cwe " " $hint "
2016-01-23 19:18:33 +01:00
! " $first " && out " $spaces "
2017-02-14 19:45:14 +01:00
out " $( toupper $proto ) : "
2015-10-01 13:27:14 +02:00
[ [ -n " $higher_proto_supported " ] ] && \
2017-03-28 19:54:54 +02:00
pr_svrty_low " $( out_row_aligned_max_width " $detected_cbc_ciphers " " " $TERM_WIDTH ) " || \
pr_svrty_medium " $( out_row_aligned_max_width " $detected_cbc_ciphers " " " $TERM_WIDTH ) "
2017-02-09 17:36:24 +01:00
outln
2016-02-07 19:13:59 +01:00
detected_cbc_ciphers = "" # empty for next round
2015-09-17 15:30:15 +02:00
first = false
2016-02-07 19:13:59 +01:00
else
2016-01-23 19:18:33 +01:00
[ [ $proto = = "tls1" ] ] && ! $first && echo -n " $spaces "
2017-02-25 16:31:30 +01:00
prln_done_good " no CBC ciphers for $( toupper $proto ) (OK) "
2016-01-23 19:18:33 +01:00
first = false
fi
2016-02-07 19:13:59 +01:00
else
if ! " $vuln_beast " ; then
2017-03-31 12:24:25 +02:00
prln_done_good " no CBC ciphers for $( toupper $proto ) (OK) "
2016-11-17 23:27:27 +01:00
fileout " cbc_ $proto " "OK" " BEAST: No CBC ciphers for $( toupper $proto ) " " $cve " " $cwe "
2016-02-07 19:13:59 +01:00
fi
2015-09-17 15:30:15 +02:00
fi
done # for proto in ssl3 tls1
2016-01-23 19:18:33 +01:00
if " $vuln_beast " ; then
2015-10-01 13:27:14 +02:00
if [ [ -n " $higher_proto_supported " ] ] ; then
2016-01-23 19:18:33 +01:00
if " $WIDE " ; then
2015-09-17 15:30:15 +02:00
outln
2016-02-07 19:13:59 +01:00
# NOT ok seems too harsh for me if we have TLS >1.0
2017-02-03 13:03:22 +01:00
pr_svrty_low "VULNERABLE"
2015-10-01 13:27:14 +02:00
outln " -- but also supports higher protocols (possible mitigation): $higher_proto_supported "
else
2016-02-07 19:13:59 +01:00
out " $spaces "
2017-02-03 13:03:22 +01:00
pr_svrty_low "VULNERABLE"
2015-09-17 15:30:15 +02:00
outln " -- but also supports higher protocols (possible mitigation): $higher_proto_supported "
2015-10-01 13:27:14 +02:00
fi
2016-11-17 23:27:27 +01:00
fileout "beast" "LOW" " BEAST: VULNERABLE -- but also supports higher protocols (possible mitigation): $higher_proto_supported " " $cve " " $cwe " " $hint "
2015-10-01 13:27:14 +02:00
else
2016-01-23 19:18:33 +01:00
if " $WIDE " ; then
2015-10-01 13:27:14 +02:00
outln
2015-09-17 15:30:15 +02:00
else
2016-02-07 19:13:59 +01:00
out " $spaces "
2015-09-17 15:30:15 +02:00
fi
2016-05-27 17:43:45 +02:00
pr_svrty_medium "VULNERABLE"
2015-10-04 12:32:29 +02:00
outln " -- and no higher protocols as mitigation supported"
2016-11-17 23:27:27 +01:00
fileout "beast" "MEDIUM" "BEAST: VULNERABLE -- and no higher protocols as mitigation supported" " $cve " " $cwe " " $hint "
2015-09-17 15:30:15 +02:00
fi
fi
2017-02-25 16:31:30 +01:00
" $first " && ! " $vuln_beast " && prln_done_good "no CBC ciphers found for any protocol (OK)"
2015-09-17 15:30:15 +02:00
2016-12-06 17:37:41 +01:00
" $using_sockets " && HAS_DH_BITS = " $has_dh_bits "
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
2015-10-01 13:27:14 +02:00
return 0
2015-05-17 22:43:53 +02:00
}
2017-02-03 22:36:04 +01:00
# http://www.isg.rhul.ac.uk/tls/Lucky13.html
# in a nutshell: don't offer CBC suites (again). MAC as a fix for padding oracles is not enough. Best: TLS v1.2+ AES GCM
2015-07-22 13:11:20 +02:00
run_lucky13( ) {
2017-02-03 22:36:04 +01:00
local spaces = " "
local cbc_ciphers = "ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:SRP-DSS-AES-256-CBC-SHA:SRP-RSA-AES-256-CBC-SHA:SRP-AES-256-CBC-SHA:RSA-PSK-AES256-CBC-SHA384:DHE-PSK-AES256-CBC-SHA384:DHE-PSK-AES256-CBC-SHA:ECDHE-PSK-CAMELLIA256-SHA384:RSA-PSK-CAMELLIA256-SHA384:DHE-PSK-CAMELLIA256-SHA384:PSK-AES256-CBC-SHA384:PSK-CAMELLIA256-SHA384:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA256:DH-RSA-AES256-SHA256:DH-DSS-AES256-SHA256:DHE-RSA-AES256-SHA:DHE-DSS-AES256-SHA:DH-RSA-AES256-SHA:DH-DSS-AES256-SHA:ECDHE-RSA-CAMELLIA256-SHA384:ECDHE-ECDSA-CAMELLIA256-SHA384:DHE-RSA-CAMELLIA256-SHA256:DHE-DSS-CAMELLIA256-SHA256:DH-RSA-CAMELLIA256-SHA256:DH-DSS-CAMELLIA256-SHA256:DHE-RSA-CAMELLIA256-SHA:DHE-DSS-CAMELLIA256-SHA:DH-RSA-CAMELLIA256-SHA:DH-DSS-CAMELLIA256-SHA:AECDH-AES256-SHA:ADH-AES256-SHA256:ADH-AES256-SHA:ADH-CAMELLIA256-SHA256:ADH-CAMELLIA256-SHA:ECDH-RSA-AES256-SHA384:ECDH-ECDSA-AES256-SHA384:ECDH-RSA-AES256-SHA:ECDH-ECDSA-AES256-SHA:ECDH-RSA-CAMELLIA256-SHA384:ECDH-ECDSA-CAMELLIA256-SHA384:AES256-SHA256:AES256-SHA:CAMELLIA256-SHA256:ECDHE-PSK-AES256-CBC-SHA384:ECDHE-PSK-AES256-CBC-SHA:CAMELLIA256-SHA:RSA-PSK-AES256-CBC-SHA:PSK-AES256-CBC-SHA:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:SRP-DSS-AES-128-CBC-SHA:SRP-RSA-AES-128-CBC-SHA:SRP-AES-128-CBC-SHA:DHE-RSA-AES128-SHA256:DHE-DSS-AES128-SHA256:DH-RSA-AES128-SHA256:DH-DSS-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA:DH-RSA-AES128-SHA:DH-DSS-AES128-SHA:ECDHE-RSA-CAMELLIA128-SHA256:ECDHE-ECDSA-CAMELLIA128-SHA256:DHE-RSA-CAMELLIA128-SHA256:DHE-DSS-CAMELLIA128-SHA256:DH-RSA-CAMELLIA128-SHA256:DH-DSS-CAMELLIA128-SHA256:DHE-RSA-SEED-SHA:DHE-DSS-SEED-SHA:DH-RSA-SEED-SHA:DH-DSS-SEED-SHA:DHE-RSA-CAMELLIA128-SHA:DHE-DSS-CAMELLIA128-SHA:DH-RSA-CAMELLIA128-SHA:DH-DSS-CAMELLIA128-SHA:AECDH-AES128-SHA:ADH-AES128-SHA256:ADH-AES128-SHA:ADH-CAMELLIA128-SHA256:ADH-SEED-SHA:ADH-CAMELLIA128-SHA:ECDH-RSA-AES128-SHA256:ECDH-ECDSA-AES128-SHA256:ECDH-RSA-AES128-SHA:ECDH-ECDSA-AES128-SHA:ECDH-RSA-CAMELLIA128-SHA256:ECDH-ECDSA-CAMELLIA128-SHA256:AES128-SHA256:AES128-SHA:CAMELLIA128-SHA256:ECDHE-PSK-AES128-CBC-SHA256:ECDHE-PSK-AES128-CBC-SHA:RSA-PSK-AES128-CBC-SHA256:DHE-PSK-AES128-CBC-SHA256:DHE-PSK-AES128-CBC-SHA:SEED-SHA:CAMELLIA128-SHA:ECDHE-PSK-CAMELLIA128-SHA256:RSA-PSK-CAMELLIA128-SHA256:DHE-PSK-CAMELLIA128-SHA256:PSK-AES128-CBC-SHA256:PSK-CAMELLIA128-SHA256:IDEA-CBC-SHA:RSA-PSK-AES128-CBC-SHA:PSK-AES128-CBC-SHA:KRB5-IDEA-CBC-SHA:KRB5-IDEA-CBC-MD5:ECDHE-RSA-DES-CBC3-SHA:ECDHE-ECDSA-DES-CBC3-SHA:SRP-DSS-3DES-EDE-CBC-SHA:SRP-RSA-3DES-EDE-CBC-SHA:SRP-3DES-EDE-CBC-SHA:EDH-RSA-DES-CBC3-SHA:EDH-DSS-DES-CBC3-SHA:DH-RSA-DES-CBC3-SHA:DH-DSS-DES-CBC3-SHA:AECDH-DES-CBC3-SHA:ADH-DES-CBC3-SHA:ECDH-RSA-DES-CBC3-SHA:ECDH-ECDSA-DES-CBC3-SHA:DES-CBC3-SHA:RSA-PSK-3DES-EDE-CBC-SHA:PSK-3DES-EDE-CBC-SHA:KRB5-DES-CBC3-SHA:KRB5-DES-CBC3-MD5:ECDHE-PSK-3DES-EDE-CBC-SHA:DHE-PSK-3DES-EDE-CBC-SHA:EXP1024-DHE-DSS-DES-CBC-SHA:EDH-RSA-DES-CBC-SHA:EDH-DSS-DES-CBC-SHA:DH-RSA-DES-CBC-SHA:DH-DSS-DES-CBC-SHA:ADH-DES-CBC-SHA:EXP1024-DES-CBC-SHA:DES-CBC-SHA:KRB5-DES-CBC-SHA:KRB5-DES-CBC-MD5:EXP-EDH-RSA-DES-CBC-SHA:EXP-EDH-DSS-DES-CBC-SHA:EXP-ADH-DES-CBC-SHA:EXP-DES-CBC-SHA:EXP-RC2-CBC-MD5:EXP-KRB5-RC2-CBC-SHA:EXP-KRB5-DES-CBC-SHA:EXP-KRB5-RC2-CBC-MD5:EXP-KRB5-DES-CBC-MD5:EXP-DH-DSS-DES-CBC-SHA:EXP-DH-RSA-DES-CBC-SHA"
cbc_ciphers_hex = "c0,28, c0,24, c0,14, c0,0a, c0,22, c0,21, c0,20, 00,b7, 00,b3, 00,91, c0,9b, c0,99, c0,97, 00,af, c0,95, 00,6b, 00,6a, 00,69, 00,68, 00,39, 00,38, 00,37, 00,36, c0,77, c0,73, 00,c4, 00,c3, 00,c2, 00,c1, 00,88, 00,87, 00,86, 00,85, c0,19, 00,6d, 00,3a, 00,c5, 00,89, c0,2a, c0,26, c0,0f, c0,05, c0,79, c0,75, 00,3d, 00,35, 00,c0, c0,38, c0,36, 00,84, 00,95, 00,8d, c0,3d, c0,3f, c0,41, c0,43, c0,45, c0,47, c0,49, c0,4b, c0,4d, c0,4f, c0,65, c0,67, c0,69, c0,71, c0,27, c0,23, c0,13, c0,09, c0,1f, c0,1e, c0,1d, 00,67, 00,40, 00,3f, 00,3e, 00,33, 00,32, 00,31, 00,30, c0,76, c0,72, 00,be, 00,bd, 00,bc, 00,bb, 00,9a, 00,99, 00,98, 00,97, 00,45, 00,44, 00,43, 00,42, c0,18, 00,6c, 00,34, 00,bf, 00,9b, 00,46, c0,29, c0,25, c0,0e, c0,04, c0,78, c0,74, 00,3c, 00,2f, 00,ba, c0,37, c0,35, 00,b6, 00,b2, 00,90, 00,96, 00,41, c0,9a, c0,98, c0,96, 00,ae, c0,94, 00,07, 00,94, 00,8c, 00,21, 00,25, c0,3c, c0,3e, c0,40, c0,42, c0,44, c0,46, c0,48, c0,4a, c0,4c, c0,4e, c0,64, c0,66, c0,68, c0,70, c0,12, c0,08, c0,1c, c0,1b, c0,1a, 00,16, 00,13, 00,10, 00,0d, c0,17, 00,1b, c0,0d, c0,03, 00,0a, 00,93, 00,8b, 00,1f, 00,23, c0,34, 00,8f, fe,ff, ff,e0, 00,63, 00,15, 00,12, 00,0f, 00,0c, 00,1a, 00,62, 00,09, 00,1e, 00,22, fe,fe, ff,e1, 00,14, 00,11, 00,19, 00,08, 00,06, 00,27, 00,26, 00,2a, 00,29, 00,0b, 00,0e"
#FIXME: we have 154 ciphers here, some devices can only take 128 ciphers!!
local has_dh_bits = " $HAS_DH_BITS "
local -i nr_supported_ciphers = 0
local using_sockets = true
2016-11-17 23:27:27 +01:00
local cve = "CVE-2013-0169"
local cwe = "CWE-310"
2017-02-03 22:36:04 +01:00
local hint = ""
[ [ $VULN_COUNT -le $VULN_THRESHLD ] ] && outln && pr_headlineln " Testing for LUCKY13 vulnerability " && outln
2017-07-26 22:37:50 +02:00
if [ [ $VULN_COUNT -le $VULN_THRESHLD ] ] || " $WIDE " ; then
outln
fi
2017-08-30 12:24:13 +02:00
pr_bold " LUCKY13" ; out " ( $cve ), experimental "
2017-02-03 22:36:04 +01:00
" $SSL_NATIVE " && using_sockets = false
# The openssl binary distributed has almost everything we need (PSK, KRB5 ciphers and feff, ffe0 are typically missing).
# Measurements show that there's little impact whether we use sockets or TLS here, so the default is sockets here
if " $using_sockets " ; then
tls_sockets "03" " ${ cbc_ciphers_hex } "
sclient_success = $?
2017-02-23 17:19:52 +01:00
[ [ " $sclient_success " -eq 2 ] ] && sclient_success = 0
2017-02-03 22:36:04 +01:00
else
nr_cbc_ciphers = $( count_ciphers $cbc_ciphers )
nr_supported_ciphers = $( count_ciphers $( actually_supported_ciphers $cbc_ciphers ) )
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS $BUGS -cipher $cbc_ciphers -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
2017-02-03 22:36:04 +01:00
sclient_connect_successful $? $TMPFILE
sclient_success = $?
[ [ " $DEBUG " -eq 2 ] ] && egrep -q "error|failure" $ERRFILE | egrep -av "unable to get local|verify error"
fi
if [ [ $sclient_success -eq 0 ] ] ; then
2017-08-30 12:24:13 +02:00
out "potentially "
pr_svrty_low "VULNERABLE" ; out ", uses cipher block chaining (CBC) ciphers with TLS"
fileout "lucky13" "LOW" "potentially vulnerable to LUCKY13, uses cipher block chaining (CBC) ciphers with TLS. Check patches" " $cve " " $cwe " " $hint "
# the CBC padding which led to timing differences during MAC processing has been solved in openssl (https://www.openssl.org/news/secadv/20130205.txt)
# and other software. However we can't tell with reasonable effort from the outside. Thus we still issue a warning and label it experimental
2017-02-03 22:36:04 +01:00
else
pr_done_best "not vulnerable (OK)" ;
if " $using_sockets " ; then
fileout "lucky13" "OK" "LUCKY13: not vulnerable" " $cve " " $cwe "
else
if [ [ " $nr_supported_ciphers " -ge 133 ] ] ; then
# Likely only PSK/KRB5 ciphers are missing: display discrepancy but no warning
out " , $nr_supported_ciphers / $nr_cbc_ciphers local ciphers "
else
pr_warning " , $nr_supported_ciphers / $nr_cbc_ciphers local ciphers "
fi
fileout "lucky13" "OK" " LUCKY13: not vulnerable ( $nr_supported_ciphers of $nr_cbc_ciphers local ciphers " " $cve " " $cwe "
fi
fi
outln
tmpfile_handle $FUNCNAME .txt
return $sclient_success
2015-05-17 22:43:53 +02:00
}
# https://tools.ietf.org/html/rfc7465 REQUIRES that TLS clients and servers NEVER negotiate the use of RC4 cipher suites!
# https://en.wikipedia.org/wiki/Transport_Layer_Security#RC4_attacks
# http://blog.cryptographyengineering.com/2013/03/attack-of-week-rc4-is-kind-of-broken-in.html
2015-07-22 13:11:20 +02:00
run_rc4( ) {
2015-09-17 15:30:15 +02:00
local -i rc4_offered = 0
2016-11-29 17:58:49 +01:00
local -i nr_ciphers = 0 nr_ossl_ciphers = 0 nr_nonossl_ciphers = 0 ret
local n auth mac export hexc sslv2_ciphers_hex = "" sslv2_ciphers_ossl = "" s
local -a normalized_hexcode hexcode ciph sslvers kx enc export2 sigalg ossl_supported
local -i i
local -a ciphers_found ciphers_found2 hexcode2 ciph2 sslvers2 rfc_ciph2
local -i -a index
2017-10-13 22:50:36 +02:00
local dhlen available = "" ciphers_to_test supported_sslv2_ciphers proto
2016-11-29 17:58:49 +01:00
local has_dh_bits = " $HAS_DH_BITS " rc4_detected = ""
local using_sockets = true
2016-11-17 23:27:27 +01:00
local cve = "CVE-2013-2566, CVE-2015-2808"
local cwe = "CWE-310"
2016-11-23 09:46:11 +01:00
local hint = ""
2015-09-17 15:30:15 +02:00
2016-11-29 17:58:49 +01:00
" $SSL_NATIVE " && using_sockets = false
" $FAST " && using_sockets = false
[ [ $TLS_NR_CIPHERS = = 0 ] ] && using_sockets = false
2015-09-17 15:30:15 +02:00
2016-02-20 14:10:04 +01:00
if [ [ $VULN_COUNT -le $VULN_THRESHLD ] ] ; then
2015-09-17 15:30:15 +02:00
outln
2015-10-15 14:15:07 +02:00
pr_headlineln " Checking for vulnerable RC4 Ciphers "
2016-02-20 14:10:04 +01:00
fi
2017-07-26 22:37:50 +02:00
if [ [ $VULN_COUNT -le $VULN_THRESHLD ] ] ; then
2015-10-15 14:15:07 +02:00
outln
2015-09-17 15:30:15 +02:00
fi
2016-11-17 23:27:27 +01:00
pr_bold " RC4" ; out " ( $cve ) "
2015-09-17 15:30:15 +02:00
2016-11-29 17:58:49 +01:00
# get a list of all the cipher suites to test
if " $using_sockets " || [ [ $OSSL_VER_MAJOR -lt 1 ] ] ; then
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
2017-02-24 16:22:59 +01:00
if [ [ " ${ TLS_CIPHER_RFC_NAME [i] } " = ~ RC4 ] ] && ( " $using_sockets " || " ${ TLS_CIPHER_OSSL_SUPPORTED [i] } " ) ; then
2016-11-29 17:58:49 +01:00
hexc = " $( tolower " ${ TLS_CIPHER_HEXCODE [i] } " ) "
ciph[ nr_ciphers] = " ${ TLS_CIPHER_OSSL_NAME [i] } "
rfc_ciph[ nr_ciphers] = " ${ TLS_CIPHER_RFC_NAME [i] } "
sslvers[ nr_ciphers] = " ${ TLS_CIPHER_SSLVERS [i] } "
kx[ nr_ciphers] = " ${ TLS_CIPHER_KX [i] } "
enc[ nr_ciphers] = " ${ TLS_CIPHER_ENC [i] } "
export2[ nr_ciphers] = " ${ TLS_CIPHER_EXPORT [i] } "
ciphers_found[ nr_ciphers] = false
sigalg[ nr_ciphers] = ""
ossl_supported[ nr_ciphers] = " ${ TLS_CIPHER_OSSL_SUPPORTED [i] } "
2017-01-05 20:20:19 +01:00
if " $using_sockets " && " $WIDE " && ! " $HAS_DH_BITS " &&
2016-11-29 17:58:49 +01:00
( [ [ ${ kx [nr_ciphers] } = = "Kx=ECDH" ] ] || [ [ ${ kx [nr_ciphers] } = = "Kx=DH" ] ] || [ [ ${ kx [nr_ciphers] } = = "Kx=EDH" ] ] ) ; then
ossl_supported[ nr_ciphers] = false
fi
if [ [ ${# hexc } -eq 9 ] ] ; then
hexcode[ nr_ciphers] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } "
if [ [ " ${ hexc : 2 : 2 } " = = "00" ] ] ; then
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 7 : 2 } "
else
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } "
fi
else
hexcode[ nr_ciphers] = " ${ hexc : 2 : 2 } , ${ hexc : 7 : 2 } , ${ hexc : 12 : 2 } "
normalized_hexcode[ nr_ciphers] = " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } ${ hexc : 12 : 2 } "
sslv2_ciphers_hex += " , ${ hexcode [nr_ciphers] } "
sslv2_ciphers_ossl += " : ${ ciph [nr_ciphers] } "
fi
nr_ciphers += 1
fi
done
else
while read hexc n ciph[ nr_ciphers] sslvers[ nr_ciphers] kx[ nr_ciphers] auth enc[ nr_ciphers] mac export2[ nr_ciphers] ; do
2017-02-24 16:22:59 +01:00
if [ [ " ${ ciph [nr_ciphers] } " = ~ RC4 ] ] ; then
2016-11-29 17:58:49 +01:00
ciphers_found[ nr_ciphers] = false
if [ [ ${# hexc } -eq 9 ] ] ; then
if [ [ " ${ hexc : 2 : 2 } " = = "00" ] ] ; then
normalized_hexcode[ nr_ciphers] = " $( tolower " x ${ hexc : 7 : 2 } " ) "
else
normalized_hexcode[ nr_ciphers] = " $( tolower " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } " ) "
fi
else
normalized_hexcode[ nr_ciphers] = " $( tolower " x ${ hexc : 2 : 2 } ${ hexc : 7 : 2 } ${ hexc : 12 : 2 } " ) "
sslv2_ciphers_ossl += " : ${ ciph [nr_ciphers] } "
fi
sigalg[ nr_ciphers] = ""
ossl_supported[ nr_ciphers] = true
nr_ciphers += 1
fi
done < <( $OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>>$ERRFILE )
fi
if " $using_sockets " && [ [ -n " $sslv2_ciphers_hex " ] ] ; then
sslv2_sockets " ${ sslv2_ciphers_hex : 2 } " "true"
if [ [ $? -eq 3 ] ] && [ [ " $V2_HELLO_CIPHERSPEC_LENGTH " -ne 0 ] ] ; then
supported_sslv2_ciphers = " $( grep "Supported cipher: " " $TEMPDIR / $NODEIP .parse_sslv2_serverhello.txt " ) "
2017-10-27 16:34:04 +02:00
" $WIDE " && " $SHOW_SIGALGO " && s = " $( read_sigalg_from_file " $HOSTCERT " ) "
2016-11-29 17:58:49 +01:00
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
if [ [ " ${ sslvers [i] } " = = "SSLv2" ] ] && [ [ " $supported_sslv2_ciphers " = ~ " ${ normalized_hexcode [i] } " ] ] ; then
ciphers_found[ i] = true
" $WIDE " && " $SHOW_SIGALGO " && sigalg[ i] = " $s "
rc4_offered = 1
fi
done
fi
elif " $HAS_SSL2 " && [ [ -n " $sslv2_ciphers_ossl " ] ] ; then
$OPENSSL s_client -cipher " ${ sslv2_ciphers_ossl : 1 } " $STARTTLS $BUGS -connect $NODEIP :$PORT $PROXY -ssl2 >$TMPFILE 2>$ERRFILE </dev/null
sclient_connect_successful " $? " " $TMPFILE "
if [ [ " $? " -eq 0 ] ] ; then
supported_sslv2_ciphers = " $( grep -A 4 "Ciphers common between both SSL endpoints:" $TMPFILE ) "
2017-10-27 16:34:04 +02:00
" $WIDE " && " $SHOW_SIGALGO " && s = " $( read_sigalg_from_file " $TMPFILE " ) "
2016-11-29 17:58:49 +01:00
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
if [ [ " ${ sslvers [i] } " = = "SSLv2" ] ] && [ [ " $supported_sslv2_ciphers " = ~ " ${ ciph [i] } " ] ] ; then
ciphers_found[ i] = true
" $WIDE " && " $SHOW_SIGALGO " && sigalg[ i] = " $s "
rc4_offered = 1
fi
done
fi
2016-07-25 22:42:04 +02:00
fi
2016-11-29 17:58:49 +01:00
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
if " ${ ossl_supported [i] } " && [ [ " ${ sslvers [i] } " != "SSLv2" ] ] ; then
ciphers_found2[ nr_ossl_ciphers] = false
sslvers2[ nr_ossl_ciphers] = " ${ sslvers [i] } "
ciph2[ nr_ossl_ciphers] = " ${ ciph [i] } "
index[ nr_ossl_ciphers] = $i
nr_ossl_ciphers += 1
fi
done
2017-10-13 22:50:36 +02:00
for proto in -no_ssl2 -tls1_1 -tls1 -ssl3; do
[ [ " $proto " != "-no_ssl2" ] ] && [ [ $( has_server_protocol " ${ proto : 1 } " ) -eq 1 ] ] && continue
! " $HAS_SSL3 " && [ [ " $proto " = = "-ssl3" ] ] && continue
while true; do
ciphers_to_test = ""
for ( ( i = 0; i < nr_ossl_ciphers; i++ ) ) ; do
! " ${ ciphers_found2 [i] } " && ciphers_to_test += " : ${ ciph2 [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
$OPENSSL s_client $( s_client_options " $proto -cipher " ${ ciphers_to_test : 1 } " $STARTTLS $BUGS -connect $NODEIP : $PORT $PROXY $SNI " ) >$TMPFILE 2>$ERRFILE </dev/null
sclient_connect_successful " $? " " $TMPFILE " || break
cipher = $( get_cipher $TMPFILE )
[ [ -z " $cipher " ] ] && break
for ( ( i = 0; i < nr_ossl_ciphers; i++ ) ) ; do
[ [ " $cipher " = = " ${ ciph2 [i] } " ] ] && ciphers_found2[ i] = true && break
done
[ [ $i -eq $nr_ossl_ciphers ] ] && break
rc4_offered = 1
i = ${ index [i] }
ciphers_found[ i] = true
if " $WIDE " && ( [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ) ; then
dhlen = $( read_dhbits_from_file " $TMPFILE " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
2016-11-29 17:58:49 +01:00
fi
2017-10-13 22:50:36 +02:00
" $WIDE " && " $SHOW_SIGALGO " && grep -q "\-\-\-\-\-BEGIN CERTIFICATE\-\-\-\-\-" $TMPFILE && \
2017-10-27 16:34:04 +02:00
sigalg[ i] = " $( read_sigalg_from_file " $TMPFILE " ) "
2017-10-13 22:50:36 +02:00
done
2016-11-29 17:58:49 +01:00
done
if " $using_sockets " ; then
for ( ( i = 0; i < nr_ciphers; i++ ) ) ; do
if ! " ${ ciphers_found [i] } " && [ [ " ${ sslvers [i] } " != "SSLv2" ] ] ; then
ciphers_found2[ nr_nonossl_ciphers] = false
sslvers2[ nr_nonossl_ciphers] = " ${ sslvers [i] } "
hexcode2[ nr_nonossl_ciphers] = " ${ hexcode [i] } "
rfc_ciph2[ nr_nonossl_ciphers] = " ${ rfc_ciph [i] } "
index[ nr_nonossl_ciphers] = $i
nr_nonossl_ciphers += 1
fi
done
2016-07-25 22:42:04 +02:00
fi
2016-11-29 17:58:49 +01:00
2017-10-13 22:50:36 +02:00
for proto in 03 02 01 00; do
[ [ $( has_server_protocol " $proto " ) -eq 1 ] ] && continue
while true; do
ciphers_to_test = ""
for ( ( i = 0; i < nr_nonossl_ciphers; i++ ) ) ; do
! " ${ ciphers_found2 [i] } " && ciphers_to_test += " , ${ hexcode2 [i] } "
done
[ [ -z " $ciphers_to_test " ] ] && break
2016-11-29 17:58:49 +01:00
if " $WIDE " && " $SHOW_SIGALGO " ; then
2017-10-13 22:50:36 +02:00
tls_sockets " $proto " " ${ ciphers_to_test : 2 } , 00,ff " "all"
2016-11-29 17:58:49 +01:00
else
2017-10-13 22:50:36 +02:00
tls_sockets " $proto " " ${ ciphers_to_test : 2 } , 00,ff " "ephemeralkey"
2016-11-29 17:58:49 +01:00
fi
ret = $?
2017-10-13 22:50:36 +02:00
[ [ $ret -ne 0 ] ] && [ [ $ret -ne 2 ] ] && break
cipher = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
for ( ( i = 0; i < nr_nonossl_ciphers; i++ ) ) ; do
[ [ " $cipher " = = " ${ rfc_ciph2 [i] } " ] ] && ciphers_found2[ i] = true && break
done
[ [ $i -eq $nr_nonossl_ciphers ] ] && break
rc4_offered = 1
i = ${ index [i] }
ciphers_found[ i] = true
if " $WIDE " && ( [ [ ${ kx [i] } = = "Kx=ECDH" ] ] || [ [ ${ kx [i] } = = "Kx=DH" ] ] || [ [ ${ kx [i] } = = "Kx=EDH" ] ] ) ; then
dhlen = $( read_dhbits_from_file " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " quiet)
kx[ i] = " ${ kx [i] } $dhlen "
2016-11-29 17:58:49 +01:00
fi
2017-10-13 22:50:36 +02:00
" $WIDE " && " $SHOW_SIGALGO " && [ [ -r " $HOSTCERT " ] ] && \
2017-10-27 16:34:04 +02:00
sigalg[ i] = " $( read_sigalg_from_file " $HOSTCERT " ) "
2017-10-13 22:50:36 +02:00
done
2016-11-29 17:58:49 +01:00
done
if [ [ $rc4_offered -eq 1 ] ] ; then
2016-03-01 20:25:41 +01:00
" $WIDE " || pr_svrty_high "VULNERABLE (NOT ok): "
2016-01-23 19:18:33 +01:00
if " $WIDE " ; then
outln "\n"
neat_header
fi
2016-11-29 17:58:49 +01:00
for ( ( i = 0 ; i<nr_ciphers; i++ ) ) ; do
if ! " ${ ciphers_found [i] } " && ! " $SHOW_EACH_C " ; then
2015-10-11 23:07:16 +02:00
continue # no successful connect AND not verbose displaying each cipher
2015-09-17 15:30:15 +02:00
fi
2016-01-23 19:18:33 +01:00
if " $WIDE " ; then
2016-01-23 23:33:17 +01:00
#FIXME: JSON+CSV in wide mode is missing
2016-12-20 19:11:03 +01:00
export = " ${ export2 [i] } "
2017-01-25 16:41:36 +01:00
neat_list " ${ normalized_hexcode [i] } " " ${ ciph [i] } " " ${ kx [i] } " " ${ enc [i] } " " ${ ciphers_found [i] } "
2016-03-05 21:07:49 +01:00
if " $SHOW_EACH_C " ; then
2016-11-29 17:58:49 +01:00
if " ${ ciphers_found [i] } " ; then
2016-03-01 20:25:41 +01:00
pr_svrty_high "available"
2015-09-17 15:30:15 +02:00
else
2017-02-06 11:06:59 +01:00
pr_deemphasize "not a/v"
2015-09-17 15:30:15 +02:00
fi
fi
2016-11-29 17:58:49 +01:00
outln " ${ sigalg [i] } "
2015-09-17 15:30:15 +02:00
fi
2017-02-13 22:07:25 +01:00
if " ${ ciphers_found [i] } " ; then
2017-02-27 16:34:04 +01:00
if ( [ [ " $DISPLAY_CIPHERNAMES " = ~ openssl ] ] && [ [ " ${ ciph [i] } " != "-" ] ] ) || [ [ " ${ rfc_ciph [i] } " = = "-" ] ] ; then
2017-02-13 22:07:25 +01:00
rc4_detected += " ${ ciph [i] } "
else
rc4_detected += " ${ rfc_ciph [i] } "
fi
fi
2016-11-29 17:58:49 +01:00
done
2017-03-28 19:54:54 +02:00
! " $WIDE " && pr_svrty_high " $( out_row_aligned_max_width " $rc4_detected " " " $TERM_WIDTH ) "
2015-09-17 15:30:15 +02:00
outln
2016-03-01 20:25:41 +01:00
" $WIDE " && pr_svrty_high "VULNERABLE (NOT ok)"
2016-11-17 23:27:27 +01:00
fileout "rc4" "HIGH" " RC4: VULNERABLE, Detected ciphers: $rc4_detected " " $cve " " $cwe " " $hint "
2016-11-29 17:58:49 +01:00
elif [ [ $nr_ciphers -eq 0 ] ] ; then
2017-02-25 16:31:30 +01:00
prln_local_problem " No RC4 Ciphers configured in $OPENSSL "
2016-11-29 17:58:49 +01:00
fileout "rc4" "WARN" " RC4 ciphers not supported by local OpenSSL ( $OPENSSL ) "
2015-09-17 15:30:15 +02:00
else
2017-02-25 16:31:30 +01:00
prln_done_good "no RC4 ciphers detected (OK)"
2016-11-17 23:27:27 +01:00
fileout "rc4" "OK" "RC4: not vulnerable" " $cve " " $cwe "
2015-09-17 15:30:15 +02:00
fi
outln
2016-11-29 17:58:49 +01:00
" $using_sockets " && HAS_DH_BITS = " $has_dh_bits "
2015-09-17 15:30:15 +02:00
tmpfile_handle $FUNCNAME .txt
return $rc4_offered
2015-05-17 22:43:53 +02:00
}
2015-07-22 13:11:20 +02:00
run_youknowwho( ) {
2016-11-17 23:27:27 +01:00
local cve = "CVE-2013-2566"
# CVE-2013-2566,
# NOT FIXME as there's no code: http://www.isg.rhul.ac.uk/tls/
# http://blog.cryptographyengineering.com/2013/03/attack-of-week-rc4-is-kind-of-broken-in.html
return 0
# in a nutshell: don't use RC4, really not!
2017-02-18 13:22:17 +01:00
}
2015-05-17 22:43:53 +02:00
2016-11-17 23:27:27 +01:00
# https://www.usenix.org/conference/woot13/workshop-program/presentation/smyth
# https://secure-resumption.com/tlsauth.pdf
2017-02-18 13:22:17 +01:00
run_tls_truncation( ) {
2016-11-17 23:27:27 +01:00
#FIXME: difficult to test, is there any test available: pls let me know
:
2015-05-17 22:43:53 +02:00
}
2017-08-04 20:48:21 +02:00
# Test for various server implementation errors that aren't tested for elsewhere.
# Inspired by https://datatracker.ietf.org/doc/draft-ietf-tls-grease.
run_grease( ) {
local -i success
local bug_found = false
local normal_hello_ok = false
local cipher_list proto selected_cipher selected_cipher_hex = "" extn rnd_bytes
local alpn_proto alpn alpn_list_len_hex extn_len_hex
local selected_alpn_protocol grease_selected_alpn_protocol
local ciph list temp curve_found
local -i i j rnd alpn_list_len extn_len
# Note: The folowing values were taken from https://datatracker.ietf.org/doc/draft-ietf-tls-grease.
# These arrays may need to be updated if the values change in the final version of this document.
local -a -r grease_cipher_suites = ( "0a,0a" "1a,1a" "2a,2a" "3a,3a" "4a,4a" "5a,5a" "6a,6a" "7a,7a" "8a,8a" "9a,9a" "aa,aa" "ba,ba" "ca,ca" "da,da" "ea,ea" "fa,fa" )
local -a -r grease_supported_groups = ( "0a,0a" "1a,1a" "2a,2a" "3a,3a" "4a,4a" "5a,5a" "6a,6a" "7a,7a" "8a,8a" "9a,9a" "aa,aa" "ba,ba" "ca,ca" "da,da" "ea,ea" "fa,fa" )
local -a -r grease_extn_values = ( "0a,0a" "1a,1a" "2a,2a" "3a,3a" "4a,4a" "5a,5a" "6a,6a" "7a,7a" "8a,8a" "9a,9a" "aa,aa" "ba,ba" "ca,ca" "da,da" "ea,ea" "fa,fa" )
local -r ecdhe_ciphers = "cc,14, cc,13, c0,30, c0,2c, c0,28, c0,24, c0,14, c0,0a, c0,9b, cc,a9, cc,a8, c0,af, c0,ad, c0,77, c0,73, c0,19, cc,ac, c0,38, c0,36, c0,49, c0,4d, c0,5d, c0,61, c0,71, c0,87, c0,8b, c0,2f, c0,2b, c0,27, c0,23, c0,13, c0,09, c0,ae, c0,ac, c0,76, c0,72, c0,18, c0,37, c0,35, c0,9a, c0,48, c0,4c, c0,5c, c0,60, c0,70, c0,86, c0,8a, c0,11, c0,07, c0,16, c0,33, c0,12, c0,08, c0,17, c0,34, c0,10, c0,06, c0,15, c0,3b, c0,3a, c0,39"
outln; pr_headline " Testing for server implementation bugs " ; outln "\n"
# Many of the following checks work by modifying the "basic" call to
# tls_sockets() and assuming the tested-for bug is present if the
# connection fails. However, this only works if the connection succeeds
# with the "basic" call. So, keep trying different "basic" calls until
# one is found that succeeds.
for ( ( i = 0; i < 5; i++ ) ) ; do
case $i in
0) proto = "03" ; cipher_list = " $TLS12_CIPHER " ; ;
1) proto = "03" ; cipher_list = " $TLS12_CIPHER_2ND_TRY " ; ;
2) proto = "02" ; cipher_list = " $TLS_CIPHER " ; ;
3) proto = "01" ; cipher_list = " $TLS_CIPHER " ; ;
4) proto = "00" ; cipher_list = " $TLS_CIPHER " ; ;
esac
tls_sockets " $proto " " $cipher_list "
success = $?
if [ [ $success -eq 0 ] ] || [ [ $success -eq 2 ] ] ; then
break
fi
done
if [ [ $success -eq 0 ] ] || [ [ $success -eq 2 ] ] ; then
selected_cipher = $( get_cipher " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
if [ [ $TLS_NR_CIPHERS -ne 0 ] ] ; then
for ( ( i = 0; i < TLS_NR_CIPHERS; i++ ) ) ; do
[ [ " $selected_cipher " = = " ${ TLS_CIPHER_RFC_NAME [i] } " ] ] && selected_cipher_hex = " ${ TLS_CIPHER_HEXCODE [i] } " && break
done
elif " $HAS_SSL2 " ; then
selected_cipher_hex = " $( $OPENSSL ciphers -V -tls1 'ALL:COMPLEMENTOFALL' | awk '/' " $selected_cipher " '/ { print $1 }' ) "
else
selected_cipher_hex = " $( $OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL' | awk '/' " $selected_cipher " '/ { print $1 }' ) "
fi
if [ [ -n " $selected_cipher_hex " ] ] ; then
normal_hello_ok = true
selected_cipher_hex = " ${ selected_cipher_hex : 2 : 2 } , ${ selected_cipher_hex : 7 : 2 } "
fi
else
proto = "03"
fi
# Test for yaSSL bug - server only looks at second byte of each cipher
# suite listed in ClientHello (see issue #793). First check to see if
# server ignores the ciphers in the ClientHello entirely, then check to
# see if server only looks at second byte of each offered cipher.
# Send a list of non-existent ciphers where the second byte does not match
# any existing cipher.
debugme echo -e "\nSending ClientHello with non-existent ciphers."
tls_sockets " $proto " "de,d0, de,d1, d3,d2, de,d3, 00,ff"
success = $?
if [ [ $success -eq 0 ] ] || [ [ $success -eq 2 ] ] ; then
prln_svrty_medium " Server claims to support non-existent cipher suite."
fileout "grease" "CRITICAL" "Server claims to support non-existent cipher suite."
bug_found = true
elif grep -q "The ServerHello specifies a cipher suite that wasn't included in the ClientHello" " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " ; then
prln_svrty_medium " Server responded with a ServerHello rather than an alert even though it doesn't support any of the client-offered cipher suites."
fileout "grease" "CRITICAL" "Server responded with a ServerHello rather than an alert even though it doesn't support any of the client-offered cipher suites."
bug_found = true
else
# Send a list of non-existent ciphers such that for each cipher that
# is defined, there is one in the list that matches in the second byte
# (but make sure list contains at more 127 ciphers).
debugme echo -e "\nSending ClientHello with non-existent ciphers, but that match existing ciphers in second byte."
tls_sockets " $proto " "de,01, de,02, de,03, de,04, de,05, de,06, de,07, de,08, de,09, de,0a, de,0b, de,0c, de,0d, de,0e, de,0f, de,10, de,11, de,12, de,13, de,14, de,15, de,16, de,17, de,18, de,19, de,1a, de,1b, de,23, de,24, de,25, de,26, de,27, de,28, de,29, de,2a, de,2b, de,2c, de,2d, de,2e, de,2f, de,30, de,31, de,32, de,33, de,34, de,35, de,36, de,37, de,38, de,39, de,3a, de,3b, de,3c, de,3d, de,3e, de,3f, de,40, de,41, de,42, de,43, de,44, de,45, de,46, de,60, de,61, de,62, de,63, de,64, de,65, de,66, de,67, de,68, de,69, de,6a, de,6b, de,6c, de,6d, de,72, de,73, de,74, de,75, de,76, de,77, de,78, de,79, de,84, de,85, de,86, de,87, de,88, de,89, de,96, de,97, de,98, de,99, de,9a, de,9b, de,9c, de,9d, de,9e, de,9f, de,a0, de,a1, de,a2, de,a3, de,a4, de,a5, de,a6, de,a7, de,ba, de,bb, de,bc, de,bd, de,be, de,bf, de,c0, de,c1, de,c2, de,c3, de,c4, de,c5, 00,ff"
success = $?
if [ [ $success -eq 0 ] ] || [ [ $success -eq 2 ] ] ; then
prln_svrty_medium " Server claims to support non-existent cipher suite."
fileout "grease" "CRITICAL" "Server claims to support non-existent cipher suite."
bug_found = true
elif grep -q " The ServerHello specifies a cipher suite that wasn't included in the ClientHello" " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " ; then
prln_svrty_medium " Server only compares against second byte in each cipher suite in ClientHello."
fileout "grease" "CRITICAL" "Server only compares against second byte in each cipher suite in ClientHello."
bug_found = true
fi
fi
# Check that server ignores unrecognized extensions
# see https://datatracker.ietf.org/doc/draft-ietf-tls-grease
if " $normal_hello_ok " && [ [ " $proto " != "00" ] ] ; then
# Try multiple different randomly-generated GREASE extensions,
# but make final test use zero-length extension value, just to
# be sure that works before testing server with a zero-length
# extension as the final extension.
for ( ( i = 1; i <= 5; i++ ) ) ; do
# Create a random extension using one of the GREASE values.
rnd = $RANDOM %${# grease_extn_values [@] }
extn = " ${ grease_extn_values [rnd] } "
if [ [ $i -eq 5 ] ] ; then
extn_len = 0
else
# Not sure what a good upper bound is here, but a key_share
# extension with an ffdhe8192 would be over 1024 bytes.
extn_len = $RANDOM %1024
fi
extn_len_hex = $( printf "%04x" $extn_len )
extn += " , ${ extn_len_hex : 0 : 2 } , ${ extn_len_hex : 2 : 2 } "
for ( ( j = 0; j <= extn_len-2; j = j+2 ) ) ; do
rnd_bytes = " $( printf "%04x" $RANDOM ) "
extn += " , ${ rnd_bytes : 0 : 2 } , ${ rnd_bytes : 2 : 2 } "
done
if [ [ $j -lt $extn_len ] ] ; then
rnd_bytes = " $( printf "%04x" $RANDOM ) "
extn += " , ${ rnd_bytes : 0 : 2 } "
fi
if [ [ $DEBUG -ge 2 ] ] ; then
echo -en "\nSending ClientHello with unrecognized extension"
[ [ $DEBUG -ge 3 ] ] && echo -n " : $extn "
echo ""
fi
tls_sockets " $proto " " $cipher_list " "" " $extn "
success = $?
if [ [ $success -ne 0 ] ] && [ [ $success -ne 2 ] ] ; then
break
fi
done
if [ [ $success -ne 0 ] ] && [ [ $success -ne 2 ] ] ; then
prln_svrty_medium " Server fails if ClientHello contains an unrecognized extension."
outln " extension used in failed test: $extn "
fileout "grease" "CRITICAL" " Server fails if ClientHello contains an unrecognized extension: $extn "
bug_found = true
else
# Check for inability to handle empty last extension (see PR #792 and
# https://www.ietf.org/mail-archive/web/tls/current/msg19720.html).
# (Since this test also uses an unrecognized extension, only run this
# test if the previous test passed, and use the final exension value
# from that test to ensure that the only difference is the location
# of the extension.)
# The "extra extensions" parameter needs to include the padding and
# heartbeat extensions, since otherwise socksend_tls_clienthello()
# will add these extensions to the end of the ClientHello.
debugme echo -e "\nSending ClientHello with empty last extension."
tls_sockets " $proto " " $cipher_list " "" "
00,0f, 00,01, 01,
00,15, 00,56,
00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,
00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,
00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,
00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,00,
$extn "
success = $?
if [ [ $success -ne 0 ] ] && [ [ $success -ne 2 ] ] ; then
prln_svrty_medium " Server fails if last extension in ClientHello is empty."
fileout "grease" "CRITICAL" "Server fails if last extension in ClientHello is empty."
bug_found = true
fi
fi
fi
# Check for SERVER_SIZE_LIMIT_BUG.
# Send a ClientHello with 129 cipher suites (including 0x00,0xff) to see
# if adding a 129th cipher to the list causes a failure.
if " $normal_hello_ok " && [ [ " $proto " = = "03" ] ] ; then
debugme echo -e "\nSending ClientHello with 129 cipher suites."
tls_sockets " $proto " " 00,27, $cipher_list "
success = $?
if [ [ $success -ne 0 ] ] && [ [ $success -ne 2 ] ] ; then
prln_svrty_medium " Server fails if ClientHello includes more than 128 cipher suites."
fileout "grease" "CRITICAL" "Server fails if ClientHello includes more than 128 cipher suites."
SERVER_SIZE_LIMIT_BUG = true
bug_found = true
fi
fi
# Check for ClientHello size bug. According to RFC 7586 "at least one TLS
# implementation is known to hang the connection when [a] ClientHello
# record [with a length between 256 and 511 bytes] is received."
# If the length of the host name is more than 75 bytes (which would make
# $SNI more than 87 bytes), then the ClientHello would be more than 511
# bytes if the server_name extension were included. Removing the SNI
# extension, however, may not be an option, since the server may reject the
# connection attempt for that reason.
if " $normal_hello_ok " && [ [ " $proto " != "00" ] ] && [ [ ${# SNI } -le 87 ] ] ; then
# Normally socksend_tls_clienthello() will add a padding extension with a length
# that will make the ClientHello be 512 bytes in length. Providing an "extra
# extensions" parameter with a short padding extension prevents that.
debugme echo -e "\nSending ClientHello with length between 256 and 511 bytes."
tls_sockets " $proto " " $cipher_list " "" "00,15,00,01,00"
success = $?
if [ [ $success -ne 0 ] ] && [ [ $success -ne 2 ] ] ; then
prln_svrty_medium " Server fails if ClientHello is between 256 and 511 bytes in length."
fileout "grease" "CRITICAL" "Server fails if ClientHello is between 256 and 511 bytes in length."
bug_found = true
fi
fi
# Check that server ignores unrecognized cipher suite values
# see https://datatracker.ietf.org/doc/draft-ietf-tls-grease
if " $normal_hello_ok " ; then
list = ""
for ciph in " ${ grease_cipher_suites [@] } " ; do
list += " , $ciph "
done
debugme echo -e "\nSending ClientHello with unrecognized cipher suite values."
tls_sockets " $proto " " ${ list : 2 } , $selected_cipher_hex , 00,ff "
success = $?
if [ [ $success -ne 0 ] ] && [ [ $success -ne 2 ] ] ; then
prln_svrty_medium " Server fails if ClientHello contains unrecognized cipher suite values."
fileout "grease" "CRITICAL" "Server fails if ClientHello contains unrecognized cipher suite values."
bug_found = true
fi
fi
# Check that servers that support ECDHE cipher suites ignore
# unrecognized named group values.
# see https://datatracker.ietf.org/doc/draft-ietf-tls-grease
if [ [ " $proto " != "00" ] ] ; then
# Send a ClientHello that lists all of the ECDHE cipher suites
tls_sockets " $proto " " $ecdhe_ciphers " "ephemeralkey"
success = $?
if [ [ $success -eq 0 ] ] || [ [ $success -eq 2 ] ] ; then
# Send the same ClientHello as before but with an unrecognized
# named group value added. Make the unrecognized value the first
# one in the list replacing one of the values in the original list,
# but don't replace the value that was selected by the server.
rnd = $RANDOM %${# grease_supported_groups [@] }
temp = $( awk -F': ' '/^Server Temp Key/ { print $2 }' " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " )
2017-10-11 16:59:13 +02:00
curve_found = " ${ temp %%,* } "
2017-10-11 21:20:49 +02:00
if [ [ " $curve_found " = = "ECDH" ] ] ; then
curve_found = " ${ temp #*, } "
curve_found = " ${ curve_found %%,* } "
fi
2017-08-04 20:48:21 +02:00
if [ [ " $curve_found " = = "B-571" ] ] ; then
extn = "
00, 0a, # Type: Supported Elliptic Curves , see RFC 4492
00, 3e, 00, 3c, # lengths
${ grease_supported_groups [rnd] } , 00, 0e, 00, 19, 00, 1c, 00, 1e, 00, 0b, 00, 0c, 00, 1b,
00, 18, 00, 09, 00, 0a, 00, 1a, 00, 16, 00, 17, 00, 1d, 00, 08,
00, 06, 00, 07, 00, 14, 00, 15, 00, 04, 00, 05, 00, 12, 00, 13,
00, 01, 00, 02, 00, 03, 00, 0f, 00, 10, 00, 11"
else
extn = "
00, 0a, # Type: Supported Elliptic Curves , see RFC 4492
00, 3e, 00, 3c, # lengths
${ grease_supported_groups [rnd] } , 00, 0d, 00, 19, 00, 1c, 00, 1e, 00, 0b, 00, 0c, 00, 1b,
00, 18, 00, 09, 00, 0a, 00, 1a, 00, 16, 00, 17, 00, 1d, 00, 08,
00, 06, 00, 07, 00, 14, 00, 15, 00, 04, 00, 05, 00, 12, 00, 13,
00, 01, 00, 02, 00, 03, 00, 0f, 00, 10, 00, 11"
fi
debugme echo -e "\nSending ClientHello with unrecognized named group value in supported_groups extension."
tls_sockets " $proto " " $ecdhe_ciphers " "" " $extn "
success = $?
if [ [ $success -ne 0 ] ] && [ [ $success -ne 2 ] ] ; then
prln_svrty_medium " Server fails if ClientHello contains a supported_groups extension with an unrecognized named group value ( ${ grease_supported_groups [rnd] } ). "
fileout "grease" "CRITICAL" " Server fails if ClientHello contains a supported_groups extension with an unrecognized named group value ( ${ grease_supported_groups [rnd] } ). "
bug_found = true
fi
fi
fi
# Check that servers that support the ALPN extension ignore
# unrecognized ALPN values.
# see https://datatracker.ietf.org/doc/draft-ietf-tls-grease
if " $normal_hello_ok " && [ [ -z $STARTTLS ] ] && [ [ " $proto " != "00" ] ] ; then
for alpn_proto in $ALPN_PROTOs ; do
alpn += " , $( printf "%02x" ${# alpn_proto } ) , $( string_to_asciihex " $alpn_proto " ) "
done
alpn_list_len = ${# alpn } /3
alpn_list_len_hex = $( printf "%04x" $alpn_list_len )
extn_len = $alpn_list_len +2
extn_len_hex = $( printf "%04x" $extn_len )
tls_sockets " $proto " " $cipher_list " "all" " 00,10, ${ extn_len_hex : 0 : 2 } , ${ extn_len_hex : 2 : 2 } , ${ alpn_list_len_hex : 0 : 2 } , ${ alpn_list_len_hex : 2 : 2 } $alpn "
success = $?
if [ [ $success -ne 0 ] ] && [ [ $success -ne 2 ] ] ; then
prln_svrty_medium " Server fails if ClientHello contains an application_layer_protocol_negotiation extension."
fileout "grease" "CRITICAL" "Server fails if ClientHello contains an application_layer_protocol_negotiation extension."
bug_found = true
else
selected_alpn_protocol = " $( grep "ALPN protocol:" " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " | sed 's/ALPN protocol: //' ) "
# If using a "normal" ALPN extension worked, then add an unrecognized
# ALPN value to the beginning of the extension and try again.
alpn_proto = " ignore/ $selected_alpn_protocol "
alpn = " , $( printf "%02x" ${# alpn_proto } ) , $( string_to_asciihex " $alpn_proto " ) $alpn "
alpn_list_len = ${# alpn } /3
alpn_list_len_hex = $( printf "%04x" $alpn_list_len )
extn_len = $alpn_list_len +2
extn_len_hex = $( printf "%04x" $extn_len )
debugme echo -e "\nSending ClientHello with unrecognized ALPN value in application_layer_protocol_negotiation extension."
tls_sockets " $proto " " $cipher_list " "all" " 00,10, ${ extn_len_hex : 0 : 2 } , ${ extn_len_hex : 2 : 2 } , ${ alpn_list_len_hex : 0 : 2 } , ${ alpn_list_len_hex : 2 : 2 } $alpn "
success = $?
if [ [ $success -ne 0 ] ] && [ [ $success -ne 2 ] ] ; then
prln_svrty_medium " Server fails if ClientHello contains an application_layer_protocol_negotiation extension with an unrecognized ALPN value."
fileout "grease" "CRITICAL" "erver fails if ClientHello contains an application_layer_protocol_negotiation extension with an unrecognized ALPN value."
bug_found = true
else
grease_selected_alpn_protocol = " $( grep "ALPN protocol:" " $TEMPDIR / $NODEIP .parse_tls_serverhello.txt " | sed 's/ALPN protocol: //' ) "
if [ [ -z " $grease_selected_alpn_protocol " ] ] && [ [ -n " $selected_alpn_protocol " ] ] ; then
prln_svrty_medium " Server did not ignore unrecognized ALPN value in the application_layer_protocol_negotiation extension."
fileout "grease" "CRITICAL" "Server did not ignore unrecognized ALPN value in the application_layer_protocol_negotiation extension."
bug_found = true
elif [ [ " $grease_selected_alpn_protocol " = ~ ignore/ ] ] ; then
prln_svrty_medium " Server selected \"ignore/\" ALPN value in the application_layer_protocol_negotiation extension."
fileout "grease" "CRITICAL" "Server selected \"ignore/\" ALPN value in the application_layer_protocol_negotiation extension."
bug_found = true
fi
fi
fi
fi
# TODO: For servers that support TLSv1.3, check that servers ignore
# an unrecognized named group value along with a corresponding
# unrecognized key share
# see https://www.ietf.org/mail-archive/web/tls/current/msg22322.html
# and https://www.ietf.org/mail-archive/web/tls/current/msg22319.html
# TODO: For servers that support TLSv1.3, check that servers ignore unrecognized
# values in the supported_versions extension.
# see https://www.ietf.org/mail-archive/web/tls/current/msg22319.html
if ! " $bug_found " ; then
outln " No bugs found."
fileout "grease" "OK" "No bugs found."
return 0
else
return 1
fi
}
2015-10-11 23:07:16 +02:00
2015-05-17 22:43:53 +02:00
old_fart( ) {
2017-02-10 20:47:49 +01:00
out "Get precompiled bins or compile "
pr_url "https://github.com/PeterMosmans/openssl"
outln "."
2017-08-28 18:25:45 +02:00
fileout_insert_warning "old_fart" "WARN" " Your $OPENSSL $OSSL_VER version is an old fart... . It doesn\'t make much sense to proceed. Get precompiled bins or compile https://github.com/PeterMosmans/openssl . "
2017-02-10 20:47:49 +01:00
fatal " Your $OPENSSL $OSSL_VER version is an old fart... . It doesn't make much sense to proceed. " -5
2015-05-17 22:43:53 +02:00
}
2016-10-02 18:15:13 +02:00
# try very hard to determine the install path to get ahold of the mapping file and the CA bundles
2016-11-15 15:20:48 +01:00
# TESTSSL_INSTALL_DIR can be supplied via environment so that the cipher mapping and CA bundles can be found
2016-10-02 18:15:13 +02:00
# www.carbonwind.net/TLS_Cipher_Suites_Project/tls_ssl_cipher_suites_simple_table_all.htm
2015-06-02 22:13:19 +02:00
get_install_dir( ) {
2017-04-12 20:34:26 +02:00
[ [ -z " $TESTSSL_INSTALL_DIR " ] ] && TESTSSL_INSTALL_DIR = " $( dirname " ${ BASH_SOURCE [0] } " ) "
2015-09-17 15:30:15 +02:00
2017-02-18 13:22:17 +01:00
if [ [ -r " $RUN_DIR /etc/cipher-mapping.txt " ] ] ; then
CIPHERS_BY_STRENGTH_FILE = " $RUN_DIR /etc/cipher-mapping.txt "
[ [ -z " $TESTSSL_INSTALL_DIR " ] ] && TESTSSL_INSTALL_DIR = " $RUN_DIR " # probably TESTSSL_INSTALL_DIR
fi
2017-04-06 09:47:09 +02:00
2016-11-15 15:20:48 +01:00
[ [ -r " $TESTSSL_INSTALL_DIR /etc/cipher-mapping.txt " ] ] && CIPHERS_BY_STRENGTH_FILE = " $TESTSSL_INSTALL_DIR /etc/cipher-mapping.txt "
if [ [ ! -r " $CIPHERS_BY_STRENGTH_FILE " ] ] ; then
[ [ -r " $RUN_DIR /cipher-mapping.txt " ] ] && CIPHERS_BY_STRENGTH_FILE = " $RUN_DIR /cipher-mapping.txt "
[ [ -r " $TESTSSL_INSTALL_DIR /cipher-mapping.txt " ] ] && CIPHERS_BY_STRENGTH_FILE = " $TESTSSL_INSTALL_DIR /cipher-mapping.txt "
2015-12-11 13:13:22 +01:00
fi
2015-09-17 15:30:15 +02:00
2016-11-15 15:20:48 +01:00
# we haven't found the cipher file yet...
2017-09-19 00:08:33 +02:00
if [ [ ! -r " $CIPHERS_BY_STRENGTH_FILE " ] ] && type -p readlink & >/dev/null ; then
2015-09-17 15:30:15 +02:00
readlink -f ls & >/dev/null && \
2017-04-12 20:34:26 +02:00
TESTSSL_INSTALL_DIR = " $( readlink -f " $( basename " ${ BASH_SOURCE [0] } " ) " ) " || \
TESTSSL_INSTALL_DIR = " $( readlink " $( basename " ${ BASH_SOURCE [0] } " ) " ) "
2015-09-17 15:30:15 +02:00
# not sure whether Darwin has -f
2017-04-12 20:34:26 +02:00
TESTSSL_INSTALL_DIR = " $( dirname " $TESTSSL_INSTALL_DIR " 2>/dev/null) "
2016-11-15 15:20:48 +01:00
[ [ -r " $TESTSSL_INSTALL_DIR /cipher-mapping.txt " ] ] && CIPHERS_BY_STRENGTH_FILE = " $TESTSSL_INSTALL_DIR /cipher-mapping.txt "
[ [ -r " $TESTSSL_INSTALL_DIR /etc/cipher-mapping.txt " ] ] && CIPHERS_BY_STRENGTH_FILE = " $TESTSSL_INSTALL_DIR /etc/cipher-mapping.txt "
2015-09-17 15:30:15 +02:00
fi
2016-11-15 15:20:48 +01:00
# still no cipher mapping file:
2017-09-19 00:08:33 +02:00
if [ [ ! -r " $CIPHERS_BY_STRENGTH_FILE " ] ] && type -p realpath & >/dev/null ; then
2017-04-12 20:34:26 +02:00
TESTSSL_INSTALL_DIR = " $( dirname " $( realpath " ${ BASH_SOURCE [0] } " ) " ) "
2016-11-15 15:20:48 +01:00
CIPHERS_BY_STRENGTH_FILE = " $TESTSSL_INSTALL_DIR /etc/cipher-mapping.txt "
[ [ -r " $TESTSSL_INSTALL_DIR /cipher-mapping.txt " ] ] && CIPHERS_BY_STRENGTH_FILE = " $TESTSSL_INSTALL_DIR /cipher-mapping.txt "
2015-09-17 15:30:15 +02:00
fi
2017-01-05 20:45:39 +01:00
# still no cipher mapping file (and realpath is not present):
2017-09-19 00:08:33 +02:00
if [ [ ! -r " $CIPHERS_BY_STRENGTH_FILE " ] ] && type -p readlink & >/dev/null ; then
2017-01-05 20:45:39 +01:00
readlink -f ls & >/dev/null && \
2017-04-12 20:34:26 +02:00
TESTSSL_INSTALL_DIR = " $( dirname " $( readlink -f " ${ BASH_SOURCE [0] } " ) " ) " || \
TESTSSL_INSTALL_DIR = " $( dirname " $( readlink " ${ BASH_SOURCE [0] } " ) " ) "
2017-01-05 20:45:39 +01:00
# not sure whether Darwin has -f
CIPHERS_BY_STRENGTH_FILE = " $TESTSSL_INSTALL_DIR /etc/cipher-mapping.txt "
[ [ -r " $TESTSSL_INSTALL_DIR /cipher-mapping.txt " ] ] && CIPHERS_BY_STRENGTH_FILE = " $TESTSSL_INSTALL_DIR /cipher-mapping.txt "
fi
2017-02-18 13:22:17 +01:00
if [ [ ! -r " $CIPHERS_BY_STRENGTH_FILE " ] ] ; then
2017-02-27 16:34:04 +01:00
DISPLAY_CIPHERNAMES = "no-rfc"
2016-12-29 00:09:44 +01:00
debugme echo " $CIPHERS_BY_STRENGTH_FILE "
2017-02-25 16:31:30 +01:00
prln_warning "\nATTENTION: No cipher mapping file found!"
2016-12-29 22:02:07 +01:00
outln " Please note from 2.9dev on $PROG_NAME needs files in \"\$TESTSSL_INSTALL_DIR/etc/\" to function correctly. "
outln
ignore_no_or_lame "Type \"yes\" to ignore this warning and proceed at your own risk" "yes"
2016-12-29 00:09:44 +01:00
[ [ $? -ne 0 ] ] && exit -2
fi
2017-04-06 09:47:09 +02:00
2017-04-12 20:34:26 +02:00
TLS_DATA_FILE = " $TESTSSL_INSTALL_DIR /etc/tls_data.txt "
2017-04-06 09:47:09 +02:00
if [ [ ! -r " $TLS_DATA_FILE " ] ] ; then
prln_warning "\nATTENTION: No TLS data file found -- needed for socket based handshakes"
outln " Please note from 2.9dev on $PROG_NAME needs files in \"\$TESTSSL_INSTALL_DIR/etc/\" to function correctly. "
outln
ignore_no_or_lame "Type \"yes\" to ignore this warning and proceed at your own risk" "yes"
[ [ $? -ne 0 ] ] && exit -2
2017-04-11 18:48:23 +02:00
else
: # see #705, in a nutshell: not portable to initialize a global array inside a function. Thus it'll be done in main part below
2017-04-06 09:47:09 +02:00
fi
2015-06-02 22:13:19 +02:00
}
2015-07-12 18:46:27 +02:00
test_openssl_suffix( ) {
2015-09-17 15:30:15 +02:00
local naming_ext = " $( uname) . $( uname -m) "
local uname_arch = " $( uname -m) "
local myarch_suffix = ""
[ [ $uname_arch = ~ 64 ] ] && myarch_suffix = 64 || myarch_suffix = 32
if [ [ -f " $1 /openssl " ] ] && [ [ -x " $1 /openssl " ] ] ; then
OPENSSL = " $1 /openssl "
return 0
elif [ [ -f " $1 /openssl. $naming_ext " ] ] && [ [ -x " $1 /openssl. $naming_ext " ] ] ; then
OPENSSL = " $1 /openssl. $naming_ext "
return 0
elif [ [ -f " $1 /openssl. $uname_arch " ] ] && [ [ -x " $1 /openssl. $uname_arch " ] ] ; then
OPENSSL = " $1 /openssl. $uname_arch "
return 0
elif [ [ -f " $1 /openssl $myarch_suffix " ] ] && [ [ -x " $1 /openssl $myarch_suffix " ] ] ; then
OPENSSL = " $1 /openssl $myarch_suffix "
return 0
fi
return 1
2015-07-12 18:46:27 +02:00
}
2016-01-23 19:18:33 +01:00
2015-07-12 18:46:27 +02:00
2015-05-17 22:43:53 +02:00
find_openssl_binary( ) {
2016-09-21 21:42:45 +02:00
local s_client_has = $TEMPDIR /s_client_has.txt
2016-12-08 19:54:44 +01:00
local s_client_starttls_has = $TEMPDIR /s_client_starttls_has.txt
2017-03-27 20:54:47 +02:00
local openssl_location cwd = ""
2016-09-21 21:42:45 +02:00
2015-09-17 15:30:15 +02:00
# 0. check environment variable whether it's executable
if [ [ -n " $OPENSSL " ] ] && [ [ ! -x " $OPENSSL " ] ] ; then
2017-02-25 16:31:30 +01:00
prln_warning " \ncannot find specified (\$OPENSSL= $OPENSSL ) binary. "
tmln_out " Looking some place else ..."
2015-09-17 15:30:15 +02:00
elif [ [ -x " $OPENSSL " ] ] ; then
: # 1. all ok supplied $OPENSSL was found and has excutable bit set -- testrun comes below
2017-09-19 00:08:33 +02:00
elif [ [ -e "/mnt/c/Windows/System32/bash.exe" ] ] && test_openssl_suffix " $( dirname " $( type -p openssl) " ) " ; then
2017-02-08 09:08:05 +01:00
# 2. otherwise, only if on Bash on Windows, use system binaries only.
SYSTEM2 = "WSL"
2017-04-19 15:40:56 +02:00
elif test_openssl_suffix " $TESTSSL_INSTALL_DIR " ; then
2017-02-07 21:59:09 +01:00
: # 3. otherwise try openssl in path of testssl.sh
2017-04-19 15:40:56 +02:00
elif test_openssl_suffix " $TESTSSL_INSTALL_DIR /bin " ; then
2017-02-07 21:59:09 +01:00
: # 4. otherwise here, this is supposed to be the standard --platform independed path in the future!!!
2017-09-19 00:08:33 +02:00
elif test_openssl_suffix " $( dirname " $( type -p openssl) " ) " ; then
2015-09-17 15:30:15 +02:00
: # 5. we tried hard and failed, so now we use the system binaries
fi
# no ERRFILE initialized yet, thus we use /dev/null for stderr directly
$OPENSSL version -a 2>/dev/null >/dev/null
if [ [ $? -ne 0 ] ] || [ [ ! -x " $OPENSSL " ] ] ; then
2017-04-07 10:26:41 +02:00
fatal "cannot exec or find any openssl binary" -5
2015-09-17 15:30:15 +02:00
fi
# http://www.openssl.org/news/openssl-notes.html
2017-09-22 18:48:38 +02:00
OSSL_NAME = $( $OPENSSL version 2>/dev/null | awk '{ print $1 }' )
2015-09-17 15:30:15 +02:00
OSSL_VER = $( $OPENSSL version 2>/dev/null | awk -F' ' '{ print $2 }' )
2017-02-24 16:22:59 +01:00
OSSL_VER_MAJOR = $( sed 's/\..*$//' <<< " $OSSL_VER " )
OSSL_VER_MINOR = $( sed -e 's/^.\.//' <<< " $OSSL_VER " | tr -d '[a-zA-Z]-' )
2017-02-24 21:59:34 +01:00
OSSL_VER_APPENDIX = $( tr -d '0-9.' <<< " $OSSL_VER " )
2015-09-17 15:30:15 +02:00
OSSL_VER_PLATFORM = $( $OPENSSL version -p 2>/dev/null | sed 's/^platform: //' )
2017-09-22 18:48:38 +02:00
OSSL_BUILD_DATE = $( $OPENSSL version -a 2>/dev/null | grep '^built' | sed -e 's/built on//' -e 's/: ... //' -e 's/: //' -e 's/ UTC//' -e 's/ +0000//' -e 's/.000000000//' )
2015-09-17 15:30:15 +02:00
2015-09-22 20:09:26 +02:00
# see #190, reverting logic: unless otherwise proved openssl has no dh bits
case " $OSSL_VER_MAJOR . $OSSL_VER_MINOR " in
2016-09-21 21:42:45 +02:00
1.0.2| 1.1.0| 1.1.1) HAS_DH_BITS = true ; ;
2015-09-22 20:09:26 +02:00
esac
2017-09-23 11:34:37 +02:00
if [ [ " $OSSL_NAME " = ~ LibreSSL ] ] ; then
[ [ ${ OSSL_VER //./ } -ge 210 ] ] && HAS_DH_BITS = true
2017-09-22 18:48:38 +02:00
if " $SSL_NATIVE " ; then
outln
pr_warning "LibreSSL in native ssl mode is not a good choice for testing INSECURE features!"
fi
2015-09-17 15:30:15 +02:00
fi
2016-09-21 21:42:45 +02:00
initialize_engine
2017-09-19 00:08:33 +02:00
openssl_location = " $( type -p $OPENSSL ) "
2017-03-27 20:54:47 +02:00
[ [ -n " $GIT_REL " ] ] && \
2017-04-12 21:39:37 +02:00
cwd = " $( /bin/pwd) " || \
cwd = " $RUN_DIR "
2017-03-27 20:54:47 +02:00
if [ [ " $openssl_location " = ~ $( /bin/pwd) /bin ] ] ; then
OPENSSL_LOCATION = " \$PWD/bin/ $( basename " $openssl_location " ) "
elif [ [ " $openssl_location " = ~ $cwd ] ] && [ [ " $cwd " != '.' ] ] ; then
OPENSSL_LOCATION = " ${ openssl_location %% $cwd } "
else
OPENSSL_LOCATION = " $openssl_location "
fi
2017-09-22 20:06:51 +02:00
OPENSSL_NR_CIPHERS = $( count_ciphers " $( $OPENSSL ciphers 'ALL:COMPLEMENTOFALL' 2>/dev/null) " )
2016-05-26 12:56:55 +02:00
2016-12-20 20:02:29 +01:00
$OPENSSL s_client -ssl2 -connect x 2>& 1 | grep -aq "unknown option" || \
2016-03-05 21:07:49 +01:00
HAS_SSL2 = true
2016-12-20 20:02:29 +01:00
$OPENSSL s_client -ssl3 -connect x 2>& 1 | grep -aq "unknown option" || \
2016-03-05 21:07:49 +01:00
HAS_SSL3 = true
2017-10-10 22:00:47 +02:00
$OPENSSL s_client -tls1_3 -connect x 2>& 1 | grep -aq "unknown option" || \
HAS_TLS13 = true
2016-12-20 20:02:29 +01:00
$OPENSSL s_client -no_ssl2 -connect x 2>& 1 | grep -aq "unknown option" || \
2016-08-24 16:14:12 +02:00
HAS_NO_SSL2 = true
2017-07-03 22:24:02 +02:00
$OPENSSL s_client -noservername -connect x 2>& 1 | grep -aq "unknown option" || \
HAS_NOSERVERNAME = true
2016-09-21 21:42:45 +02:00
$OPENSSL s_client -help 2>$s_client_has
2016-12-08 19:54:44 +01:00
$OPENSSL s_client -starttls foo 2>$s_client_starttls_has
2016-09-21 21:42:45 +02:00
grep -qw '\-alpn' $s_client_has && \
2016-03-05 21:07:49 +01:00
HAS_ALPN = true
2016-09-21 21:42:45 +02:00
grep -qw '\-nextprotoneg' $s_client_has && \
2016-03-05 21:07:49 +01:00
HAS_SPDY = true
2015-09-17 15:30:15 +02:00
2016-09-21 21:42:45 +02:00
grep -qw '\-fallback_scsv' $s_client_has && \
HAS_FALLBACK_SCSV = true
grep -q '\-proxy' $s_client_has && \
HAS_PROXY = true
grep -q '\-xmpp' $s_client_has && \
HAS_XMPP = true
2016-12-08 19:54:44 +01:00
grep -q 'postgres' $s_client_starttls_has && \
HAS_POSTGRES = true
2017-06-29 23:57:32 +02:00
grep -q 'mysql' $s_client_starttls_has && \
HAS_MYSQL = true
2016-07-12 15:59:24 +02:00
if [ [ " $OPENSSL_TIMEOUT " != "" ] ] ; then
2017-09-19 00:08:33 +02:00
if type -p timeout 2>& 1 >/dev/null ; then
2016-09-27 22:15:57 +02:00
# there are different "timeout". Check whether --preserve-status is supported
if timeout --help 2>/dev/null | grep -q 'preserve-status' ; then
OPENSSL = " timeout --preserve-status $OPENSSL_TIMEOUT $OPENSSL "
else
OPENSSL = " timeout $OPENSSL_TIMEOUT $OPENSSL "
fi
else
outln
2016-12-29 22:02:07 +01:00
ignore_no_or_lame " Neccessary binary \"timeout\" not found. Continue without timeout? " "y"
2016-09-27 22:15:57 +02:00
[ [ $? -ne 0 ] ] && exit -2
unset OPENSSL_TIMEOUT
fi
2016-07-12 15:59:24 +02:00
fi
2015-09-17 15:30:15 +02:00
return 0
2015-05-17 22:43:53 +02:00
}
2017-08-28 18:25:45 +02:00
2016-07-04 13:59:39 +02:00
check4openssl_oldfarts( ) {
2015-09-17 15:30:15 +02:00
case " $OSSL_VER " in
0.9.7*| 0.9.6*| 0.9.5*)
# 0.9.5a was latest in 0.9.5 an released 2000/4/1, that'll NOT suffice for this test
old_fart ; ;
0.9.8)
case $OSSL_VER_APPENDIX in
a| b| c| d| e) old_fart; ; # no SNI!
# other than that we leave this for MacOSX and FreeBSD but it's a pain and likely gives false negatives/positives
esac
; ;
esac
if [ [ $OSSL_VER_MAJOR -lt 1 ] ] ; then ## mm: Patch for libressl
2017-02-25 16:31:30 +01:00
prln_warning " Your \" $OPENSSL \" is way too old (<version 1.0) ! "
2015-09-17 15:30:15 +02:00
case $SYSTEM in
*BSD| Darwin)
2017-02-10 20:47:49 +01:00
out " Please use binary provided in \$INSTALLDIR/bin/ or from ports/brew or compile from "
pr_url "github.com/PeterMosmans/openssl" ; outln "."
2017-08-28 18:25:45 +02:00
fileout_insert_warning "too_old_openssl" "WARN" " Your $OPENSSL $OSSL_VER version is way too old. Please use binary provided in \$INSTALLDIR/bin/ or from ports/brew or compile from github.com/PeterMosmans/openssl . " ; ;
2017-02-10 20:47:49 +01:00
*) out " Update openssl binaries or compile from "
pr_url "https://github.com/PeterMosmans/openssl" ; outln "."
2017-08-28 18:25:45 +02:00
fileout_insert_warning "too_old_openssl" "WARN" "Update openssl binaries or compile from https://github.com/PeterMosmans/openssl ." ; ;
2015-09-17 15:30:15 +02:00
esac
2016-12-29 22:02:07 +01:00
ignore_no_or_lame " Type \"yes\" to accept false negatives or positives" "yes"
2016-07-26 17:29:25 +02:00
[ [ $? -ne 0 ] ] && exit -2
2015-09-17 15:30:15 +02:00
fi
outln
2015-05-17 22:43:53 +02:00
}
2016-07-04 23:05:12 +02:00
# FreeBSD needs to have /dev/fd mounted. This is a friendly hint, see #258
check_bsd_mount( ) {
2016-10-28 15:30:07 +02:00
if [ [ " $( uname) " = = FreeBSD ] ] ; then
2016-07-16 20:48:56 +02:00
if ! mount | grep -q "^devfs" ; then
outln " you seem to run $PROG_NAME = in a jail. Hopefully you're did \"mount -t fdescfs fdesc /dev/fd\" "
elif mount | grep '/dev/fd' | grep -q fdescfs; then
:
else
2016-07-04 23:05:12 +02:00
fatal "You need to mount fdescfs on FreeBSD: \"mount -t fdescfs fdesc /dev/fd\"" -3
fi
fi
}
2015-05-17 22:43:53 +02:00
help( ) {
2015-09-17 15:30:15 +02:00
cat << EOF
2015-05-17 22:43:53 +02:00
2017-06-20 23:18:15 +02:00
" $PROG_NAME [options] <URI> " or " $PROG_NAME <options> "
2016-11-05 13:43:55 +01:00
" $PROG_NAME <options> " , where <options> is:
2015-05-17 22:43:53 +02:00
2017-04-19 19:46:54 +02:00
--help what you' re looking at
2015-06-23 12:58:40 +02:00
-b, --banner displays banner + version of $PROG_NAME
-v, --version same as previous
-V, --local pretty print all local ciphers
2016-11-05 13:43:55 +01:00
-V, --local <pattern> which local ciphers with <pattern> are available? If pattern is not a number: word match
2017-06-20 23:18:15 +02:00
<pattern> is always an ignore case word pattern of cipher hexcode or any other string in the name, kx or bits
" $PROG_NAME <URI> " , where <URI> is:
2016-11-05 13:43:55 +01:00
2017-06-20 23:18:15 +02:00
<URI> host| host:port| URL| URL:port port 443 is default, URL can only contain HTTPS protocol)
2016-11-05 13:43:55 +01:00
2017-06-20 23:18:15 +02:00
" $PROG_NAME [options] <URI> " , where [ options] is:
2015-05-17 22:43:53 +02:00
2017-06-12 18:23:55 +02:00
-t, --starttls <protocol> Does a default run against a STARTTLS enabled <protocol,
2017-06-29 23:57:32 +02:00
protocol is <ftp| smtp| pop3| imap| xmpp| telnet| ldap| postgres| mysql> ( latter 4 require supplied openssl)
2017-06-12 18:23:55 +02:00
--xmpphost <to_domain> For STARTTLS enabled XMPP it supplies the XML stream to-'' domain -- sometimes needed
--mx <domain/host> Tests MX records from high to low priority ( STARTTLS, port 25)
--file <fname| fname.gmap> Mass testing option: Reads command lines from <fname>, one line per instance.
2017-06-12 17:09:52 +02:00
Comments via # allowed, EOF signals end of <fname>. Implicitly turns on "--warnings batch".
2017-06-12 22:56:36 +02:00
Alternatively: nmap output in greppable format ( -oG) ( 1x port per line allowed)
2017-06-12 19:07:58 +02:00
--mode <serial| parallel> Mass testing to be done serial ( default) or parallel ( --parallel is shortcut for the latter)
2015-05-17 22:43:53 +02:00
2017-09-23 12:54:44 +02:00
single check as <options> ( " $PROG_NAME URI " does everything except -E and -g) :
2015-06-22 18:32:40 +02:00
-e, --each-cipher checks each local cipher remotely
-E, --cipher-per-proto checks those per protocol
2017-04-08 09:14:56 +02:00
-s, --std, --standard tests certain lists of cipher suites by strength
2015-12-27 14:51:18 +01:00
-p, --protocols checks TLS/SSL protocols ( including SPDY/HTTP2)
-S, --server-defaults displays the server' s default picks and certificate info
2015-12-29 10:05:20 +01:00
-P, --server-preference displays the server' s picks: protocol+cipher
2015-07-17 15:58:07 +02:00
-x, --single-cipher <pattern> tests matched <pattern> of ciphers
( if <pattern> not a number: word match)
2016-01-15 15:53:03 +01:00
-c, --client-simulation test client simulations, see which client negotiates with cipher and protocol
2017-04-18 23:15:32 +02:00
-h, --header, --headers tests HSTS, HPKP, server/app banner, security headers, cookie, reverse proxy, IPv4 address
2016-01-15 15:53:03 +01:00
2016-11-05 13:43:55 +01:00
-U, --vulnerable tests all ( of the following) vulnerabilities ( if applicable)
2017-06-20 23:18:15 +02:00
-H, --heartbleed tests for Heartbleed vulnerability
2015-06-23 12:58:40 +02:00
-I, --ccs, --ccs-injection tests for CCS injection vulnerability
2017-04-18 23:15:32 +02:00
-T, --ticketbleed tests for Ticketbleed vulnerability in BigIP loadbalancers
2015-06-22 18:32:40 +02:00
-R, --renegotiation tests for renegotiation vulnerabilities
2017-01-24 08:37:19 +01:00
-C, --compression, --crime tests for CRIME vulnerability ( TLS compression issue)
2017-04-18 23:15:32 +02:00
-B, --breach tests for BREACH vulnerability ( HTTP compression issue)
2015-06-22 18:32:40 +02:00
-O, --poodle tests for POODLE ( SSL) vulnerability
-Z, --tls-fallback checks TLS_FALLBACK_SCSV mitigation
2017-02-02 14:42:06 +01:00
-W, --sweet32 tests 64 bit block ciphers ( 3DES, RC2 and IDEA) : SWEET32 vulnerability
2017-02-03 22:36:04 +01:00
-A, --beast tests for BEAST vulnerability
-L, --lucky13 tests for LUCKY13
2015-06-22 18:32:40 +02:00
-F, --freak tests for FREAK vulnerability
-J, --logjam tests for LOGJAM vulnerability
2016-03-21 23:03:42 +01:00
-D, --drown tests for DROWN vulnerability
2017-04-08 09:14:56 +02:00
-f, --pfs, --fs, --nsa checks ( perfect) forward secrecy settings
2015-06-22 18:32:40 +02:00
-4, --rc4, --appelbaum which RC4 ciphers are being offered?
2017-09-23 12:54:44 +02:00
-g, --grease tests several server implementation bugs like GREASE and size limitations
2015-05-17 22:43:53 +02:00
2016-11-05 13:43:55 +01:00
tuning / connect options ( most also can be preset via environment variables) :
2016-11-15 12:59:07 +01:00
--fast omits some checks: using openssl for all ciphers ( -e) , show only first
preferred cipher
2015-11-11 11:56:32 +01:00
--bugs enables the "-bugs" option of s_client, needed e.g. for some buggy F5s
2016-10-11 22:30:30 +02:00
--assume-http if protocol check fails it assumes HTTP protocol and enforces HTTP checks
2015-08-28 00:15:51 +02:00
--ssl-native fallback to checks with OpenSSL where sockets are normally used
2016-07-06 20:23:32 +02:00
--openssl <PATH> use this openssl binary ( default: look in \$ PATH, \$ RUN_DIR of $PROG_NAME )
2017-02-15 19:40:06 +01:00
--proxy <host:port| auto> connect via the specified HTTP proxy, auto: autodetermination from \$ env ( \$ http( s) _proxy)
-6 also use IPv6. Works only with supporting OpenSSL version and IPv6 connectivity
2016-11-05 13:43:55 +01:00
--ip <ip> a) tests the supplied <ip> v4 or v6 address instead of resolving host( s) in URI
b) arg "one" means: just test the first DNS returns ( useful for multiple IPs)
-n, --nodns do not try any DNS lookup
2015-11-03 23:29:53 +01:00
--sneaky leave less traces in target logs: user agent, referer
2016-01-23 19:18:33 +01:00
2016-01-23 23:33:17 +01:00
output options ( can also be preset via environment variables) :
2016-01-23 19:18:33 +01:00
--warnings <batch| off| false> "batch" doesn' t wait for keypress, "off" or "false" skips connection warning
2016-09-27 23:38:47 +02:00
--openssl-timeout <seconds> useful to avoid hangers. <seconds> to wait before openssl connect will be terminated
2015-08-24 22:17:35 +02:00
--quiet don' t output the banner. By doing this you acknowledge usage terms normally appearing in the banner
--wide wide output for tests like RC4, BEAST. PFS also with hexcode, kx, strength, RFC name
--show-each for wide outputs: display all ciphers tested -- not only succeeded ones
2017-03-28 12:07:45 +02:00
--mapping <openssl| openssl: use the OpenSSL cipher suite name as the primary name cipher suite name form ( default)
rfc| rfc: use the RFC cipher suite name as the primary name cipher suite name form
no-openssl| no-openssl: don' t display the OpenSSL cipher suite name, display RFC names only
no-rfc> no-rfc: don' t display the RFC cipher suite name, display OpenSSL names only
2015-08-24 22:17:35 +02:00
--color <0| 1| 2> 0: no escape or other codes, 1: b/w escape codes, 2: color ( default)
2015-12-06 20:11:33 +01:00
--colorblind swap green and blue in the output
2016-07-08 11:15:41 +02:00
--debug <0-6> 1: screen output normal but keeps debug output in /tmp/. 2-6: see "grep -A 5 '^DEBUG=' testssl.sh"
2015-06-02 22:13:19 +02:00
2017-03-28 12:07:45 +02:00
file output options ( can also be preset via environment variables)
2017-10-26 11:46:14 +02:00
--log, --logging logs stdout to '\${NODE}-p\${port}\${YYYYMMDD-HHMM}.log' in current working directory ( cwd)
--logfile| -oL <logfile> logs stdout to 'dir/\${NODE}-p\${port}\${YYYYMMDD-HHMM}.log' . If 'logfile' is a dir or to a specified 'logfile'
--json additional output of findings to flat JSON file '\${NODE}-p\${port}\${YYYYMMDD-HHMM}.json' in cwd
2017-10-20 16:32:57 +02:00
--jsonfile| -oj <jsonfile> additional output to the specified flat JSON file or directory, similar to --logfile
2017-10-26 11:46:14 +02:00
--json-pretty additional JSON structured output of findings to a file '\${NODE}-p\${port}\${YYYYMMDD-HHMM}.json' in cwd
2017-10-20 16:32:57 +02:00
--jsonfile-pretty| -oJ <jsonfile> additional JSON structured output to the specified file or directory, similar to --logfile
2017-11-01 09:58:52 +01:00
--csv additional output of findings to CSV file '\${NODE}-p\${port}\${YYYYMMDD-HHMM}.csv' in cwd or directory
2017-10-20 16:32:57 +02:00
--csvfile| -oC <csvfile> additional output as CSV to the specified file or directory, similar to --logfile
2017-11-01 09:58:52 +01:00
--html additional output as HTML to file '\${NODE}-p\${port}\${YYYYMMDD-HHMM}.html'
2017-10-20 16:32:57 +02:00
--htmlfile| -oH <htmlfile> additional output as HTML to the specifed file or directory, similar to --logfile
2017-11-01 09:58:52 +01:00
--out( f,F) ile| -oa/-oA <fname> log to a LOG,JSON,CSV,HTML file ( see nmap) . -oA/-oa: pretty/flat JSON. "auto" uses '\${NODE}-p\${port}\${YYYYMMDD-HHMM}'
2016-11-17 23:27:27 +01:00
--hints additional hints to findings
2016-11-05 13:43:55 +01:00
--severity <severity> severities with lower level will be filtered for CSV+JSON, possible values <LOW| MEDIUM| HIGH| CRITICAL>
2017-03-27 11:37:18 +02:00
--append if <logfile>, <csvfile>, <jsonfile> or <htmlfile> exists rather append then overwrite. Omits any header
2017-11-14 19:41:25 +01:00
--prefix <out_fname_prefix> before '\${NODE}.' above prepend <out_fname_prefix>
2016-01-23 19:18:33 +01:00
2016-11-05 13:43:55 +01:00
Options requiring a value can also be called with '=' e.g. testssl.sh -t= smtp --wide --openssl= /usr/bin/openssl <URI>.
2017-06-20 23:18:15 +02:00
<URI> always needs to be the last parameter.
2015-05-29 19:44:27 +02:00
2015-05-17 22:43:53 +02:00
EOF
Massing testing with command line error
There is a bug in testssl.sh that occurs if mass testing is being performed, there is an error in the command line for one of the child tests, and either a single HTML file or a single JSON file is being created.
If mass testing is being performed and `parse_cmd_line()` detects an error in the command line for one of the child tests, then it will call `help()`, which will exit the program, resulting in `cleanup ()` being called. `cleanup ()` will call `html_footer()` and `fileout_footer()`. Since `html_header()` and `json_header()` have not yet been called, `$HTMLHEADER` and `$JSONHEADER` will both be `true, and so `html_footer()` and `fileout_footer()` will output HTML and JSON footers, even though no headers have been output.
This PR fixes the problem by having `help()` set `$HTMLHEADER` and `$JSONHEADER` to `false` so that no HTML or JSON footers are created.
A related problem is that if a single JSON file is being created, the parent process will insert a separator (a comma) into the JSON file between the outputs of each child process. However, if there is an error in one of the child process's command lines, then this child process will not produce any JSON output and so the JSON file will have two consecutive separators (commas), which is invalid according to http://jsonlint.com.
This PR provides a partial fix for the problem for parallel mass testing by checking whether a child process has created a non-empty JSON output before adding a separator to the JSON file. It leaves two unresolved problems:
* It does not fix the problem at all for `run_mass_testing()`, where the separator is added before the test with the command line error is run.
* It does not fix the problem for parallel mass testing for the case in which the first child test has a command line error.
2017-05-22 22:57:15 +02:00
# Set HTMLHEADER and JSONHEADER to false so that the cleanup() function won't
# try to write footers to the HTML and JSON files.
HTMLHEADER = false
JSONHEADER = false
2016-01-13 10:21:01 +01:00
#' Fix syntax highlight on sublime
Stop parent if child encounters parsing error
This PR implements the suggestion from #753 for a child process in mass testing to send a signal to the parent to exit if the child encounters an error parsing its command line. At the moment, the child only sends the signal if it encounters an error that results in the `help()` function being called, but that could easily be changed (e.g., to also send a signal if `fatal()` is called in the child process).
In the case of parallel mass testing, the cleanup function needs to call `get_next_message_testing_parallel_result()` for the child that sent the signal, since otherwise the child's error message would not be displayed. Since I cannot tell which child sent the signal, I just call `cleanup()`, which displays the output of all completed child processes. Since the child process will send the signal almost immediately after starting, it can be assumed the that process that send the signal will be the last one that completed, and so its output will be displayed last (so it isn't hidden from the user).
Note that PR #753 is still needed, since there are still scenarios in which a child would not produce any JSON output, but the parent testssl.sh would not exit (e.g., the child process cannot open a socket to the server it is supposed to test). In additional, PR #754 would still be useful, since it would be more user friendly to catch the error in the mass testing file immediately (when possible) rather that partway through a potentially time-consuming testing process.
2017-05-24 23:12:18 +02:00
" $CHILD_MASS_TESTING " && kill -s USR1 $PPID
2015-09-17 15:30:15 +02:00
exit $1
2015-05-17 22:43:53 +02:00
}
maketempf( ) {
2017-03-31 12:24:25 +02:00
TEMPDIR = $( mktemp -d /tmp/testssl.XXXXXX) || exit -6
2015-09-17 15:30:15 +02:00
TMPFILE = $TEMPDIR /tempfile.txt || exit -6
if [ [ " $DEBUG " -eq 0 ] ] ; then
2016-01-23 19:18:33 +01:00
ERRFILE = "/dev/null"
2015-09-17 15:30:15 +02:00
else
ERRFILE = $TEMPDIR /errorfile.txt || exit -6
fi
HOSTCERT = $TEMPDIR /host_certificate.txt
2016-09-21 21:42:45 +02:00
}
prepare_debug( ) {
2015-09-17 15:30:15 +02:00
if [ [ $DEBUG -ne 0 ] ] ; then
cat >$TEMPDIR /environment.txt << EOF
2015-05-17 22:43:53 +02:00
2016-09-21 21:42:45 +02:00
2015-05-31 14:40:12 +02:00
CVS_REL: $CVS_REL
GIT_REL: $GIT_REL
2015-05-29 10:36:14 +02:00
2015-05-17 22:43:53 +02:00
PID: $$
2016-05-26 12:56:55 +02:00
commandline: " $CMDLINE "
2015-05-17 22:43:53 +02:00
bash version: ${ BASH_VERSINFO [0] } .${ BASH_VERSINFO [1] } .${ BASH_VERSINFO [2] }
status: ${ BASH_VERSINFO [4] }
machine: ${ BASH_VERSINFO [5] }
operating system: $SYSTEM
2017-02-08 09:08:05 +01:00
os constraint: $SYSTEM2
2015-05-17 22:43:53 +02:00
shellopts: $SHELLOPTS
2015-07-20 14:05:35 +02:00
$( $OPENSSL version -a)
2015-05-29 10:36:14 +02:00
OSSL_VER_MAJOR: $OSSL_VER_MAJOR
OSSL_VER_MINOR: $OSSL_VER_MINOR
OSSL_VER_APPENDIX: $OSSL_VER_APPENDIX
2016-03-05 21:07:49 +01:00
OSSL_BUILD_DATE: $OSSL_BUILD_DATE
OSSL_VER_PLATFORM: $OSSL_VER_PLATFORM
2015-05-17 22:43:53 +02:00
2016-05-26 12:56:55 +02:00
OPENSSL_NR_CIPHERS: $OPENSSL_NR_CIPHERS
2015-07-20 14:05:35 +02:00
OPENSSL_CONF: $OPENSSL_CONF
2015-10-11 23:07:16 +02:00
HAS_IPv6: $HAS_IPv6
HAS_SSL2: $HAS_SSL2
HAS_SSL3: $HAS_SSL3
2017-10-10 22:00:47 +02:00
HAS_TLS13: $HAS_TLS13
2016-08-24 16:14:12 +02:00
HAS_NO_SSL2: $HAS_NO_SSL2
2015-10-11 23:07:16 +02:00
HAS_SPDY: $HAS_SPDY
HAS_ALPN: $HAS_ALPN
2016-09-21 21:42:45 +02:00
HAS_FALLBACK_SCSV: $HAS_FALLBACK_SCSV
HAS_PROXY: $HAS_PROXY
HAS_XMPP: $HAS_XMPP
2016-12-08 19:54:44 +01:00
HAS_POSTGRES: $HAS_POSTGRES
2017-06-29 23:57:32 +02:00
HAS_MYSQL: $HAS_MYSQL
2015-10-11 23:07:16 +02:00
2015-05-17 22:43:53 +02:00
PATH: $PATH
PROG_NAME: $PROG_NAME
2016-10-02 18:15:13 +02:00
TESTSSL_INSTALL_DIR: $TESTSSL_INSTALL_DIR
2015-05-17 22:43:53 +02:00
RUN_DIR: $RUN_DIR
2017-02-18 13:22:17 +01:00
CIPHERS_BY_STRENGTH_FILE: $CIPHERS_BY_STRENGTH_FILE
2015-05-17 22:43:53 +02:00
CAPATH: $CAPATH
COLOR: $COLOR
2015-12-06 20:11:33 +01:00
COLORBLIND: $COLORBLIND
2016-06-23 19:42:26 +02:00
TERM_WIDTH: $TERM_WIDTH
2015-11-03 10:30:59 +01:00
INTERACTIVE: $INTERACTIVE
2015-05-29 14:12:22 +02:00
HAS_GNUDATE: $HAS_GNUDATE
2016-06-20 21:51:40 +02:00
HAS_FREEBSDDATE: $HAS_FREEBSDDATE
2015-06-17 11:33:29 +02:00
HAS_SED_E: $HAS_SED_E
2015-05-17 22:43:53 +02:00
SHOW_EACH_C: $SHOW_EACH_C
SSL_NATIVE: $SSL_NATIVE
2016-10-11 22:30:30 +02:00
ASSUME_HTTP $ASSUME_HTTP
2015-05-17 22:43:53 +02:00
SNEAKY: $SNEAKY
DEBUG: $DEBUG
HSTS_MIN: $HSTS_MIN
HPKP_MIN: $HPKP_MIN
CLIENT_MIN_PFS: $CLIENT_MIN_PFS
DAYS2WARN1: $DAYS2WARN1
DAYS2WARN2: $DAYS2WARN2
HEADER_MAXSLEEP: $HEADER_MAXSLEEP
MAX_WAITSOCK: $MAX_WAITSOCK
HEARTBLEED_MAX_WAITSOCK: $HEARTBLEED_MAX_WAITSOCK
CCS_MAX_WAITSOCK: $CCS_MAX_WAITSOCK
USLEEP_SND $USLEEP_SND
USLEEP_REC $USLEEP_REC
EOF
2017-09-19 00:08:33 +02:00
type -p locale & >/dev/null && locale >>$TEMPDIR /environment.txt || echo "locale doesn't exist" >>$TEMPDIR /environment.txt
2015-09-17 15:30:15 +02:00
$OPENSSL ciphers -V 'ALL:COMPLEMENTOFALL' & >$TEMPDIR /all_local_ciphers.txt
fi
2016-09-21 21:42:45 +02:00
# see also $TEMPDIR/s_client_has.txt from find_openssl_binary
2017-03-21 12:44:03 +01:00
}
prepare_arrays( ) {
local hexc mac ossl_ciph
local ossl_supported_tls = "" ossl_supported_sslv2 = ""
2017-05-22 23:04:58 +02:00
local -i i = 0
2016-06-10 17:11:39 +02:00
2017-04-12 20:34:26 +02:00
if [ [ -e " $CIPHERS_BY_STRENGTH_FILE " ] ] ; then
2016-11-15 21:06:24 +01:00
" $HAS_SSL2 " && ossl_supported_sslv2 = " $( $OPENSSL ciphers -ssl2 -V 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>$ERRFILE ) "
ossl_supported_tls = " $( $OPENSSL ciphers -tls1 -V 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>$ERRFILE ) "
2017-05-22 23:04:58 +02:00
while read hexc n TLS_CIPHER_OSSL_NAME[ i] TLS_CIPHER_RFC_NAME[ i] TLS_CIPHER_SSLVERS[ i] TLS_CIPHER_KX[ i] TLS_CIPHER_AUTH[ i] TLS_CIPHER_ENC[ i] mac TLS_CIPHER_EXPORT[ i] ; do
TLS_CIPHER_HEXCODE[ i] = " $hexc "
TLS_CIPHER_OSSL_SUPPORTED[ i] = false
2016-11-15 21:06:24 +01:00
if [ [ ${# hexc } -eq 9 ] ] ; then
2017-05-16 09:45:16 +02:00
# >= SSLv3 ciphers
2016-11-15 21:06:24 +01:00
if [ [ $OSSL_VER_MAJOR -lt 1 ] ] ; then
2017-05-22 23:04:58 +02:00
[ [ " : ${ ossl_supported_tls } : " = ~ " : ${ TLS_CIPHER_OSSL_NAME [i] } : " ] ] && TLS_CIPHER_OSSL_SUPPORTED[ i] = true
2016-11-15 21:06:24 +01:00
else
2017-07-20 19:13:06 +02:00
ossl_ciph = " $( awk '/' " $hexc " '/ { print $3 }' <<< " $ossl_supported_tls " ) "
2016-11-15 21:06:24 +01:00
if [ [ -n " $ossl_ciph " ] ] ; then
2017-05-22 23:04:58 +02:00
TLS_CIPHER_OSSL_SUPPORTED[ i] = true
[ [ " $ossl_ciph " != " ${ TLS_CIPHER_OSSL_NAME [i] } " ] ] && TLS_CIPHER_OSSL_NAME[ i] = " $ossl_ciph "
2016-11-15 21:06:24 +01:00
fi
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
fi
2016-11-15 21:06:24 +01:00
elif [ [ $OSSL_VER_MAJOR -lt 1 ] ] ; then
2017-05-22 23:04:58 +02:00
[ [ " : ${ ossl_supported_sslv2 } : " = ~ " : ${ TLS_CIPHER_OSSL_NAME [i] } : " ] ] && TLS_CIPHER_OSSL_SUPPORTED[ i] = true
2016-11-15 21:06:24 +01:00
else
2017-10-09 15:13:46 +02:00
[ [ " $ossl_supported_sslv2 " = ~ $hexc ] ] && TLS_CIPHER_OSSL_SUPPORTED[ i] = true
Faster version of run_allciphers()
This PR speeds up the implementation of `run_allciphers()` by introducing a number of changes:
* Rather than check for implemented ciphers in a hierarchical manner (as introduced in #326), this PR follows the approach of `cipher_pref_check()`. Testing a block of ciphers, marking the selected cipher as implemented, and then testing same block of ciphers, minus those that have previously been selected, until a test fails. Thus the number of calls to `$OPENSSL s_client` is just one more than the number of ciphers implemented. (Since some servers cannot handle ClientHellos with more than 128 messages, the tests are performed on blocks of 128 or few ciphers. So, if OpenSSL supports 197 ciphers, the number of calls to `$OPENSSL s_client` is 2 plus the number of ciphers supported by the server.
* If $using_sockets is true, then OpenSSL is used first to find all supported ciphers that OpenSSL supports (since OpenSSL is faster than `tls_sockets()`), and then `tls_sockets()` is only used to test those cipher suites that were not found to be supported by OpenSSL.
* The `prepare_debug()` function, which reads in `$CIPHERS_BY_STRENGTH_FILE` determines which ciphers are supported by the version of OpenSSL being used. If a version of OpenSSL older than 1.0 is being used, then this is used to determine which ciphers to test using OpenSSL rather than using `$OPENSSL ciphers -V`.
Following the approach of `cipher_pref_check()` reduces the number of queries to the server. Using OpenSSL before `tls_sockets()` reduces the number of calls to `tls_sockets()` to 3 plus the number of ciphers supported by the server that are not supported by OpenSSL, so the cost penalty over just using OpenSSL is fairly small.
2016-11-15 20:45:50 +01:00
fi
2017-05-22 23:04:58 +02:00
i += 1
2017-04-12 20:34:26 +02:00
done < " $CIPHERS_BY_STRENGTH_FILE "
2016-11-15 15:20:48 +01:00
fi
2017-05-22 23:04:58 +02:00
TLS_NR_CIPHERS = i
2015-09-22 20:09:26 +02:00
}
2015-05-17 22:43:53 +02:00
2015-09-22 20:09:26 +02:00
mybanner( ) {
local idtag
2017-02-10 20:47:49 +01:00
local bb1 bb2 bb3
2015-05-17 22:43:53 +02:00
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
" $QUIET " && return
" $CHILD_MASS_TESTING " && return
2016-05-26 12:56:55 +02:00
OPENSSL_NR_CIPHERS = $( count_ciphers " $( $OPENSSL ciphers 'ALL:COMPLEMENTOFALL:@STRENGTH' 2>/dev/null) " )
2015-09-22 20:09:26 +02:00
[ [ -z " $GIT_REL " ] ] && \
idtag = " $CVS_REL " || \
idtag = " $GIT_REL -- $CVS_REL_SHORT "
2017-02-07 20:25:41 +01:00
bb1 = $( cat <<EOF
2015-09-22 20:09:26 +02:00
###########################################################
2017-04-05 20:39:35 +02:00
$PROG_NAME $VERSION from
2017-02-07 20:25:41 +01:00
EOF
)
bb2 = $( cat <<EOF
2015-09-22 20:09:26 +02:00
2016-01-23 19:18:33 +01:00
This program is free software. Distribution and
modification under GPLv2 permitted.
2015-09-22 20:09:26 +02:00
USAGE w/o ANY WARRANTY. USE IT AT YOUR OWN RISK!
2017-04-18 23:15:32 +02:00
Please file bugs @
2017-02-10 20:47:49 +01:00
EOF
)
bb3 = $( cat <<EOF
2015-09-22 20:09:26 +02:00
###########################################################
EOF
)
2017-02-10 20:47:49 +01:00
pr_bold " $bb1 "
pr_boldurl " $SWURL " ; outln
2017-04-05 20:39:35 +02:00
if [ [ -n " $idtag " ] ] ; then
#FIXME: if we run it not off the git dir we miss the version tag.
# at least we don't want to display empty brackets here...
pr_bold " ("
pr_grey " $idtag "
prln_bold ")"
fi
2017-02-07 20:25:41 +01:00
pr_bold " $bb2 "
2017-02-10 20:47:49 +01:00
pr_boldurl "https://testssl.sh/bugs/" ; outln
pr_bold " $bb3 "
2015-09-22 20:09:26 +02:00
outln "\n"
2016-05-26 12:56:55 +02:00
outln " Using \" $( $OPENSSL version 2>/dev/null) \" [~ $OPENSSL_NR_CIPHERS ciphers] "
2016-01-15 16:37:47 +01:00
out " on $HNAME : "
2017-02-07 20:25:41 +01:00
outln " $OPENSSL_LOCATION "
2015-09-22 20:09:26 +02:00
outln " (built: \" $OSSL_BUILD_DATE \", platform: \" $OSSL_VER_PLATFORM \")\n "
2015-05-17 22:43:53 +02:00
}
2015-09-22 20:09:26 +02:00
2015-05-17 22:43:53 +02:00
cleanup ( ) {
2017-04-05 22:58:57 +02:00
# If parallel mass testing is being performed, then the child tests need
# to be killed before $TEMPDIR is deleted. Otherwise, error messages
# will be created if testssl.sh is stopped before all testing is complete.
2017-05-16 20:16:35 +02:00
" $INTERACTIVE " && [ [ $NR_PARALLEL_TESTS -gt 0 ] ] && echo -en "\r \r" 1>& 2
2017-04-05 22:58:57 +02:00
while [ [ $NEXT_PARALLEL_TEST_TO_FINISH -lt $NR_PARALLEL_TESTS ] ] ; do
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
if [ [ ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } -ne 0 ] ] && \
ps ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } >/dev/null ; then
2017-04-05 22:58:57 +02:00
kill ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } >& 2 2>/dev/null
wait ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } 2>/dev/null # make sure pid terminated, see wait(1p)
2017-05-16 20:16:35 +02:00
get_next_message_testing_parallel_result "stopped"
2017-04-05 22:58:57 +02:00
else
# If a test had already completed, but its output wasn't yet processed,
# then process it now.
2017-05-16 20:16:35 +02:00
get_next_message_testing_parallel_result "completed"
2017-04-05 22:58:57 +02:00
fi
NEXT_PARALLEL_TEST_TO_FINISH += 1
done
2015-09-17 15:30:15 +02:00
if [ [ " $DEBUG " -ge 1 ] ] ; then
2017-02-25 16:31:30 +01:00
tmln_out
tm_underline " DEBUG (level $DEBUG ): see files in $TEMPDIR "
tmln_out
2015-09-17 15:30:15 +02:00
else
[ [ -d " $TEMPDIR " ] ] && rm -rf " $TEMPDIR " ;
fi
outln
Handle --file option
Introduced "trick" so that if the `--file` option is used, `html_header()` will only be called once before anything is printed and `html_footer()` will only be called once after all printing is complete. With this, `html_header()` now delete the output file if it exists.
Also introduced the `html_reserved()`, which is called for all text to be sent to `out_html()`. `html_reserved()` converts any HTML reserved characters (", ', &, <, >) to their corresponding entity names (", ', &, <, >).
2017-02-09 23:03:21 +01:00
html_footer
2017-03-27 00:30:42 +02:00
fileout_footer
2017-03-25 19:37:30 +01:00
# debugging off, see above
2017-03-27 08:59:29 +02:00
grep -q xtrace <<< " $SHELLOPTS " && ! " $DEBUG_ALLINONE " && exec 2>& 42 42>& -
2015-05-17 22:43:53 +02:00
}
Stop parent if child encounters parsing error
This PR implements the suggestion from #753 for a child process in mass testing to send a signal to the parent to exit if the child encounters an error parsing its command line. At the moment, the child only sends the signal if it encounters an error that results in the `help()` function being called, but that could easily be changed (e.g., to also send a signal if `fatal()` is called in the child process).
In the case of parallel mass testing, the cleanup function needs to call `get_next_message_testing_parallel_result()` for the child that sent the signal, since otherwise the child's error message would not be displayed. Since I cannot tell which child sent the signal, I just call `cleanup()`, which displays the output of all completed child processes. Since the child process will send the signal almost immediately after starting, it can be assumed the that process that send the signal will be the last one that completed, and so its output will be displayed last (so it isn't hidden from the user).
Note that PR #753 is still needed, since there are still scenarios in which a child would not produce any JSON output, but the parent testssl.sh would not exit (e.g., the child process cannot open a socket to the server it is supposed to test). In additional, PR #754 would still be useful, since it would be more user friendly to catch the error in the mass testing file immediately (when possible) rather that partway through a potentially time-consuming testing process.
2017-05-24 23:12:18 +02:00
child_error( ) {
cleanup
exit 1
}
2015-08-24 23:50:03 +02:00
fatal( ) {
2017-04-07 10:26:41 +02:00
outln
2017-02-25 16:31:30 +01:00
prln_magenta " Fatal error: $1 " >& 2
2015-09-17 15:30:15 +02:00
exit $2
2016-07-04 23:05:12 +02:00
# 1: cmd line error
# 2: secondary/other cmd line error
# -1: other user error
# -2: network problem
# -3: s.th. fatal is not supported in the client
# -4: s.th. is not supported yet
# -5: openssl problem
2015-08-24 23:50:03 +02:00
}
2015-05-17 22:43:53 +02:00
# for now only GOST engine
initialize_engine( ) {
2015-09-17 15:30:15 +02:00
grep -q '^# testssl config file' " $OPENSSL_CONF " 2>/dev/null && return 0 # have been here already
2017-09-23 11:34:37 +02:00
if " $NO_ENGINE " ; then
return 1
elif $OPENSSL engine gost -v 2>& 1 | egrep -q 'invalid command|no such engine' ; then
2015-09-17 15:30:15 +02:00
outln
2016-03-05 21:07:49 +01:00
pr_warning " No engine or GOST support via engine with your $OPENSSL " ; outln
2017-08-28 18:25:45 +02:00
fileout_insert_warning "engine_problem" "WARN" " No engine or GOST support via engine with your $OPENSSL "
2015-09-17 15:30:15 +02:00
return 1
2017-09-22 18:48:38 +02:00
elif ! $OPENSSL engine gost -vvvv -t -c 2>/dev/null >/dev/null; then
2015-09-17 15:30:15 +02:00
outln
2016-03-05 21:07:49 +01:00
pr_warning " No engine or GOST support via engine with your $OPENSSL " ; outln
2017-08-28 18:25:45 +02:00
fileout_insert_warning "engine_problem" "WARN" " No engine or GOST support via engine with your $OPENSSL "
2015-09-17 15:30:15 +02:00
return 1
else # we have engine support
if [ [ -n " $OPENSSL_CONF " ] ] ; then
2017-02-25 16:31:30 +01:00
prln_warning "For now I am providing the config file to have GOST support"
2015-09-17 15:30:15 +02:00
else
OPENSSL_CONF = $TEMPDIR /gost.conf || exit -6
# see https://www.mail-archive.com/openssl-users@openssl.org/msg65395.html
cat >$OPENSSL_CONF << EOF
2015-07-13 23:24:23 +02:00
# testssl config file for openssl
2015-05-17 22:43:53 +02:00
2015-07-13 23:24:23 +02:00
openssl_conf = openssl_def
2015-07-02 16:39:41 +02:00
2015-05-17 22:43:53 +02:00
[ openssl_def ]
engines = engine_section
[ engine_section ]
gost = gost_section
[ gost_section ]
engine_id = gost
default_algorithms = ALL
CRYPT_PARAMS = id-Gost28147-89-CryptoPro-A-ParamSet
EOF
2015-09-17 15:30:15 +02:00
export OPENSSL_CONF
fi
fi
return 0
2015-05-17 22:43:53 +02:00
}
2016-12-29 22:02:07 +01:00
# arg1: text to display before "-->"
# arg2: arg needed to accept to continue
2015-05-17 22:43:53 +02:00
ignore_no_or_lame( ) {
2015-09-17 15:30:15 +02:00
local a
2016-07-04 23:05:12 +02:00
[ [ " $WARNINGS " = = off ] ] && return 0
[ [ " $WARNINGS " = = batch ] ] && return 1
2017-02-25 16:31:30 +01:00
tm_warning " $1 --> "
2015-09-17 15:30:15 +02:00
read a
2016-12-29 22:02:07 +01:00
if [ [ " $a " = = " $( tolower " $2 " ) " ] ] ; then
2017-02-24 16:22:59 +01:00
return 0
2016-12-29 22:02:07 +01:00
else
return 1
fi
2015-05-17 22:43:53 +02:00
}
2015-06-15 12:13:16 +02:00
# arg1: URI
2015-05-17 22:43:53 +02:00
parse_hn_port( ) {
2015-09-17 15:30:15 +02:00
local tmp_port
NODE = " $1 "
2017-11-05 22:41:11 +01:00
NODE = " ${ NODE /https \: \/ \/ / } " # strip "https"
NODE = " ${ NODE %%/* } " # strip trailing urlpath
NODE = " ${ NODE %%. } " # strip trailing "." if supplied
2015-09-17 15:30:15 +02:00
2016-05-20 13:45:53 +02:00
# if there's a trailing ':' probably a starttls/application protocol was specified
2017-03-23 16:36:29 +01:00
if grep -q ':$' <<< " $NODE " ; then
if grep -wq http <<< " $NODE " ; then
2017-02-16 19:10:59 +01:00
fatal "\"http\" is not what you meant probably" 1
else
fatal " \" $1 \" is not a valid URI " 1
fi
2016-05-20 13:45:53 +02:00
fi
2015-09-17 15:30:15 +02:00
# was the address supplied like [AA:BB:CC::]:port ?
2017-02-16 19:10:59 +01:00
if grep -q ']' <<< " $NODE " ; then
2015-09-17 15:30:15 +02:00
tmp_port = $( printf " $NODE " | sed 's/\[.*\]//' | sed 's/://' )
# determine v6 port, supposed it was supplied additionally
if [ [ -n " $tmp_port " ] ] ; then
PORT = $tmp_port
NODE = $( sed " s/: $PORT // " <<< " $NODE " )
fi
NODE = $( sed -e 's/\[//' -e 's/\]//' <<< " $NODE " )
else
# determine v4 port, supposed it was supplied additionally
2017-03-23 16:36:29 +01:00
grep -q ':' <<< " $NODE " && \
PORT = $( sed 's/^.*\://' <<< " $NODE " ) && NODE = $( sed 's/\:.*$//' <<< " $NODE " )
2015-09-17 15:30:15 +02:00
fi
debugme echo $NODE :$PORT
SNI = " -servername $NODE "
2017-03-23 16:36:29 +01:00
URL_PATH = $( sed 's/https:\/\///' <<< " $1 " | sed 's/' " ${ NODE } " '//' | sed 's/.*' " ${ PORT } " '//' ) # remove protocol and node part and port
URL_PATH = $( sed 's/\/\//\//g' <<< " $URL_PATH " ) # we rather want // -> /
2016-06-23 19:42:26 +02:00
[ [ -z " $URL_PATH " ] ] && URL_PATH = "/"
debugme echo $URL_PATH
2017-03-23 16:36:29 +01:00
return 0 # NODE, URL_PATH, PORT is set now
2016-06-23 19:42:26 +02:00
}
# now do logging if instructed
# arg1: for testing mx records name we put a name of logfile in here, otherwise we get strange file names
prepare_logging( ) {
local fname_prefix = " $1 "
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
local filename_provided = false
[ [ -n " $LOGFILE " ] ] && [ [ ! -d " $LOGFILE " ] ] && filename_provided = true
# Similar to html_header():
! " $do_logging " && return 0
" $do_mass_testing " && ! " $filename_provided " && return 0
" $CHILD_MASS_TESTING " && " $filename_provided " && return 0
2016-06-23 19:42:26 +02:00
2017-11-14 19:41:25 +01:00
[ [ -z " $fname_prefix " ] ] && fname_prefix = " ${ FNAME_PREFIX } . ${ NODE } " _p" ${ PORT } "
2016-06-23 19:42:26 +02:00
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
if [ [ -z " $LOGFILE " ] ] ; then
2017-04-07 09:49:44 +02:00
LOGFILE = " $fname_prefix - $( date +"%Y%m%d-%H%M" .log) "
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
elif [ [ -d " $LOGFILE " ] ] ; then
# actually we were instructed to place all files in a DIR instead of the current working dir
2017-04-07 09:49:44 +02:00
LOGFILE = " $LOGFILE / $fname_prefix - $( date +"%Y%m%d-%H%M" .log) "
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
else
: # just for clarity: a log file was specified, no need to do anything else
fi
2017-03-27 11:29:21 +02:00
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
if ! " $APPEND " ; then
2017-04-07 10:26:41 +02:00
[ [ -e " $LOGFILE " ] ] && fatal " \" $LOGFILE \" exists. Either use \"--append\" or (re)move it " 1
2015-11-11 11:56:32 +01:00
fi
2017-04-06 17:26:03 +02:00
tmln_out " ## Scan started as: \" $PROG_NAME $CMDLINE \" " >>" $LOGFILE "
tmln_out " ## at $HNAME : $OPENSSL_LOCATION " >>" $LOGFILE "
tmln_out " ## version testssl: $VERSION ${ GIT_REL_SHORT :- $CVS_REL_SHORT } from $REL_DATE " >>" $LOGFILE "
tmln_out " ## version openssl: \" $OSSL_VER \" from \" $OSSL_BUILD_DATE \")\n " >>" $LOGFILE "
2017-05-12 15:51:19 +02:00
exec > >( tee -a -i " $LOGFILE " )
2015-06-16 14:04:44 +02:00
}
2016-10-28 15:30:07 +02:00
2015-07-22 13:11:20 +02:00
# args: string containing ip addresses
2015-08-01 23:11:27 +02:00
filter_ip6_address( ) {
2015-09-17 15:30:15 +02:00
local a
for a in " $@ " ; do
if ! is_ipv6addr " $a " ; then
continue
fi
2016-03-05 21:07:49 +01:00
if " $HAS_SED_E " ; then
2017-03-31 12:24:25 +02:00
sed -E 's/^abcdeABCDEFf0123456789:]//g' <<< " $a " | sed -e '/^$/d' -e '/^;;/d'
2015-09-17 15:30:15 +02:00
else
2017-03-31 12:24:25 +02:00
sed -r 's/[^abcdefABCDEF0123456789:]//g' <<< " $a " | sed -e '/^$/d' -e '/^;;/d'
2015-09-17 15:30:15 +02:00
fi
done
2015-07-22 13:11:20 +02:00
}
2015-07-23 17:11:33 +02:00
2015-08-01 23:11:27 +02:00
filter_ip4_address( ) {
2015-09-17 15:30:15 +02:00
local a
for a in " $@ " ; do
if ! is_ipv4addr " $a " ; then
continue
fi
2016-03-05 21:07:49 +01:00
if " $HAS_SED_E " ; then
2017-03-31 12:24:25 +02:00
sed -E 's/[^[:digit:].]//g' <<< " $a " | sed -e '/^$/d'
2015-09-17 15:30:15 +02:00
else
2017-03-31 12:24:25 +02:00
sed -r 's/[^[:digit:].]//g' <<< " $a " | sed -e '/^$/d'
2015-09-17 15:30:15 +02:00
fi
done
2015-07-22 13:11:20 +02:00
}
2015-07-13 23:24:23 +02:00
2015-08-12 00:17:28 +02:00
get_local_aaaa( ) {
2015-09-17 15:30:15 +02:00
local ip6 = ""
local etchosts = "/etc/hosts /c/Windows/System32/drivers/etc/hosts"
2016-01-23 19:18:33 +01:00
2015-09-17 15:30:15 +02:00
# for security testing sometimes we have local entries. Getent is BS under Linux for localhost: No network, no resolution
2017-04-04 09:54:47 +02:00
ip6 = $( grep -wih " $1 " $etchosts 2>/dev/null | grep ':' | egrep -v '^#|\.local' | egrep -i " [[:space:]] $1 " | awk '{ print $1 }' )
2015-09-17 15:30:15 +02:00
if is_ipv6addr " $ip6 " ; then
echo " $ip6 "
else
echo ""
fi
2015-08-12 00:17:28 +02:00
}
get_local_a( ) {
2015-09-17 15:30:15 +02:00
local ip4 = ""
local etchosts = "/etc/hosts /c/Windows/System32/drivers/etc/hosts"
2016-01-23 19:18:33 +01:00
2015-09-17 15:30:15 +02:00
# for security testing sometimes we have local entries. Getent is BS under Linux for localhost: No network, no resolution
2017-04-04 09:54:47 +02:00
ip4 = $( grep -wih " $1 " $etchosts 2>/dev/null | egrep -v ':|^#|\.local' | egrep -i " [[:space:]] $1 " | awk '{ print $1 }' )
2015-09-17 15:30:15 +02:00
if is_ipv4addr " $ip4 " ; then
echo " $ip4 "
else
echo ""
fi
2015-08-12 00:17:28 +02:00
}
2015-09-21 16:43:47 +02:00
check_resolver_bins( ) {
2017-09-19 00:08:33 +02:00
if ! type -p dig & > /dev/null && ! type -p host & > /dev/null && ! type -p drill & > /dev/null && ! type -p nslookup & >/dev/null; then
2015-11-28 17:33:10 +01:00
fatal "Neither \"dig\", \"host\", \"drill\" or \"nslookup\" is present" "-3"
2015-09-21 16:43:47 +02:00
fi
return 0
}
2015-07-23 17:11:33 +02:00
# arg1: a host name. Returned will be 0-n IPv4 addresses
2017-02-15 19:40:06 +01:00
# watch out: $1 can also be a cname! --> all checked
2015-07-23 17:11:33 +02:00
get_a_record( ) {
2015-09-17 15:30:15 +02:00
local ip4 = ""
local saved_openssl_conf = " $OPENSSL_CONF "
2016-10-28 21:37:10 +02:00
" $NODNS " && return 0 # if no DNS lookup was instructed, leave here
2015-09-17 15:30:15 +02:00
OPENSSL_CONF = "" # see https://github.com/drwetter/testssl.sh/issues/134
2017-06-01 18:08:13 +02:00
check_resolver_bins
2015-11-01 02:01:52 +01:00
if [ [ " $NODE " = = *.local ] ] ; then
2017-09-19 00:08:33 +02:00
if type -p avahi-resolve & >/dev/null; then
2015-11-05 22:54:29 +01:00
ip4 = $( filter_ip4_address $( avahi-resolve -4 -n " $1 " 2>/dev/null | awk '{ print $2 }' ) )
2017-09-19 00:08:33 +02:00
elif type -p dig & >/dev/null; then
2015-11-06 02:04:04 +01:00
ip4 = $( filter_ip4_address $( dig @224.0.0.251 -p 5353 +short -t a +notcp " $1 " 2>/dev/null | sed '/^;;/d' ) )
2015-11-01 02:01:52 +01:00
else
2016-07-04 23:05:12 +02:00
fatal "Local hostname given but no 'avahi-resolve' or 'dig' avaliable." -3
2015-11-01 02:01:52 +01:00
fi
2015-11-03 10:30:59 +01:00
fi
2015-09-17 15:30:15 +02:00
if [ [ -z " $ip4 " ] ] ; then
2017-09-19 00:08:33 +02:00
if type -p dig & > /dev/null ; then
2017-02-15 19:40:06 +01:00
ip4 = $( filter_ip4_address $( dig +short -t a " $1 " 2>/dev/null | awk '/^[0-9]/' ) )
2016-09-21 21:59:50 +02:00
fi
2015-09-17 15:30:15 +02:00
fi
if [ [ -z " $ip4 " ] ] ; then
2017-09-19 00:08:33 +02:00
type -p host & > /dev/null && \
2017-02-15 19:40:06 +01:00
ip4 = $( filter_ip4_address $( host -t a " $1 " 2>/dev/null | awk '/address/ { print $NF }' ) )
2015-09-17 15:30:15 +02:00
fi
2015-11-23 14:54:41 +01:00
if [ [ -z " $ip4 " ] ] ; then
2017-09-19 00:08:33 +02:00
type -p drill & > /dev/null && \
2017-02-15 19:40:06 +01:00
ip4 = $( filter_ip4_address $( drill a " $1 " | awk '/ANSWER SECTION/,/AUTHORITY SECTION/ { print $NF }' | awk '/^[0-9]/' ) )
2015-11-23 14:54:41 +01:00
fi
2015-09-17 15:30:15 +02:00
if [ [ -z " $ip4 " ] ] ; then
2017-09-19 00:08:33 +02:00
if type -p nslookup & >/dev/null; then
2017-02-15 19:40:06 +01:00
ip4 = $( filter_ip4_address $( nslookup -querytype= a " $1 " 2>/dev/null | awk '/^Name/ { getline; print $NF }' ) )
2015-09-17 15:30:15 +02:00
fi
fi
OPENSSL_CONF = " $saved_openssl_conf " # see https://github.com/drwetter/testssl.sh/issues/134
echo " $ip4 "
2015-07-23 17:11:33 +02:00
}
# arg1: a host name. Returned will be 0-n IPv6 addresses
2017-02-15 19:40:06 +01:00
# watch out: $1 can also be a cname! --> all checked
2015-07-23 17:11:33 +02:00
get_aaaa_record( ) {
2015-09-17 15:30:15 +02:00
local ip6 = ""
local saved_openssl_conf = " $OPENSSL_CONF "
2016-10-28 21:37:10 +02:00
" $NODNS " && return 0 # if no DNS lookup was instructed, leave here
2015-09-17 15:30:15 +02:00
OPENSSL_CONF = "" # see https://github.com/drwetter/testssl.sh/issues/134
2017-06-01 18:08:13 +02:00
check_resolver_bins
2015-09-17 15:30:15 +02:00
if [ [ -z " $ip6 " ] ] ; then
2015-11-01 02:01:52 +01:00
if [ [ " $NODE " = = *.local ] ] ; then
2017-09-19 00:08:33 +02:00
if type -p avahi-resolve & >/dev/null; then
2017-02-15 19:40:06 +01:00
ip6 = $( filter_ip6_address $( avahi-resolve -6 -n " $1 " 2>/dev/null | awk '{ print $2 }' ) )
2017-09-19 00:08:33 +02:00
elif type -p dig & >/dev/null; then
2015-11-06 02:04:04 +01:00
ip6 = $( filter_ip6_address $( dig @ff02::fb -p 5353 -t aaaa +short +notcp " $NODE " ) )
2015-11-01 02:01:52 +01:00
else
2016-07-04 23:05:12 +02:00
fatal "Local hostname given but no 'avahi-resolve' or 'dig' avaliable." -3
2015-11-01 02:01:52 +01:00
fi
2017-09-19 00:08:33 +02:00
elif type -p host & > /dev/null ; then
2017-02-15 19:40:06 +01:00
ip6 = $( filter_ip6_address $( host -t aaaa " $1 " | awk '/address/ { print $NF }' ) )
2017-09-19 00:08:33 +02:00
elif type -p dig & > /dev/null; then
2017-02-15 19:40:06 +01:00
ip6 = $( filter_ip6_address $( dig +short -t aaaa " $1 " 2>/dev/null | awk '/^[0-9]/' ) )
2017-09-19 00:08:33 +02:00
elif type -p drill & > /dev/null; then
2017-02-15 19:40:06 +01:00
ip6 = $( filter_ip6_address $( drill aaaa " $1 " | awk '/ANSWER SECTION/,/AUTHORITY SECTION/ { print $NF }' | awk '/^[0-9]/' ) )
2017-09-19 00:08:33 +02:00
elif type -p nslookup & >/dev/null; then
2017-02-15 19:40:06 +01:00
ip6 = $( filter_ip6_address $( nslookup -type= aaaa " $1 " 2>/dev/null | awk '/' " ^ ${ a } " '.*AAAA/ { print $NF }' ) )
2015-09-17 15:30:15 +02:00
fi
fi
OPENSSL_CONF = " $saved_openssl_conf " # see https://github.com/drwetter/testssl.sh/issues/134
echo " $ip6 "
2015-07-23 17:11:33 +02:00
}
2017-01-17 13:57:14 +01:00
# RFC6844: DNS Certification Authority Authorization (CAA) Resource Record
# arg1: domain to check for
get_caa_rr_record( ) {
2017-02-14 16:18:27 +01:00
local raw_caa = ""
2017-02-11 14:01:51 +01:00
local caa_flag
local -i len_caa_property
local caa_property_name
local caa_property_value
2017-01-17 13:57:14 +01:00
local saved_openssl_conf = " $OPENSSL_CONF "
2017-10-18 18:43:54 +02:00
local all_caa = ""
2017-01-17 13:57:14 +01:00
2017-06-01 18:08:13 +02:00
" $NODNS " && return 0 # if no DNS lookup was instructed, leave here
2017-02-11 14:01:51 +01:00
# if there's a type257 record there are two output formats here, mostly depending on age of distribution
# rougly that's the difference between text and binary format
2017-02-14 16:18:27 +01:00
# 1) 'google.com has CAA record 0 issue "symantec.com"'
2017-02-11 14:01:51 +01:00
# 2) 'google.com has TYPE257 record \# 19 0005697373756573796D616E7465632E636F6D'
# for dig +short the output always starts with '0 issue [..]' or '\# 19 [..]' so we normalize thereto to keep caa_flag, caa_property
# caa_property then has key/value pairs, see https://tools.ietf.org/html/rfc6844#section-3
2017-01-17 13:57:14 +01:00
OPENSSL_CONF = ""
2017-06-01 18:08:13 +02:00
check_resolver_bins
2017-09-19 00:08:33 +02:00
if type -p dig & > /dev/null; then
2017-02-11 14:01:51 +01:00
raw_caa = " $( dig $1 type257 +short) "
2017-01-17 13:57:14 +01:00
# empty if no CAA record
2017-09-19 00:08:33 +02:00
elif type -p drill & > /dev/null; then
2017-02-15 19:40:06 +01:00
raw_caa = " $( drill $1 type257 | awk '/' " ^ ${ 1 } " '.*CAA/ { print $5,$6,$7 }' ) "
2017-09-19 00:08:33 +02:00
elif type -p host & > /dev/null; then
2017-02-11 14:01:51 +01:00
raw_caa = " $( host -t type257 $1 ) "
if egrep -wvq "has no CAA|has no TYPE257" <<< " $raw_caa " ; then
raw_caa = " $( sed -e 's/^.*has CAA record //' -e 's/^.*has TYPE257 record //' <<< " $raw_caa " ) "
2017-01-17 13:57:14 +01:00
fi
2017-09-19 00:08:33 +02:00
elif type -p nslookup & > /dev/null; then
2017-02-11 14:01:51 +01:00
raw_caa = " $( nslookup -type= type257 $1 | grep -w rdata_257) "
if [ [ -n " $raw_caa " ] ] ; then
2017-02-14 16:18:27 +01:00
raw_caa = " $( sed 's/^.*rdata_257 = //' <<< " $raw_caa " ) "
2017-01-17 13:57:14 +01:00
fi
else
return 1
2017-02-14 16:18:27 +01:00
# No dig, drill, host, or nslookup --> complaint was elsewhere already
2017-01-17 13:57:14 +01:00
fi
OPENSSL_CONF = " $saved_openssl_conf " # see https://github.com/drwetter/testssl.sh/issues/134
2017-02-14 16:18:27 +01:00
debugme echo $raw_caa
2017-02-11 14:01:51 +01:00
2017-10-18 18:43:54 +02:00
if [ [ " $raw_caa " = ~ \# \ [ 0-9] [ 0-9] ] ] ; then
# for posteo we get this binary format returned e.g. for old dig versions:
# \# 19 0005697373756567656F74727573742E636F6D
# \# 23 0009697373756577696C6467656F74727573742E636F6D
# \# 34 0005696F6465666D61696C746F3A686F73746D617374657240706F73 74656F2E6465
# # len caaflag <more_see_below> @ p o s t e o . d e
while read hash len line ; do
if [ [ " ${ line : 0 : 2 } " = = "00" ] ] ; then # probably the caa flag, always 00, so we don't keep this
len_caa_property = $( printf "%0d" " $(( 10# ${ line : 2 : 2 } )) " ) # get len and do type casting, for posteo we have 05 or 09 here as a string
len_caa_property = $(( len_caa_property*2)) # =>word! Now get name from 4th and value from 4th+len position...
line = " ${ line / / } " # especially with iodefs there's a blank in the string which we just skip
caa_property_name = " $( hex2ascii ${ line : 4 : $len_caa_property } ) "
caa_property_value = " $( hex2ascii " ${ line : $(( 4 + len_caa_property)) : 100 } " ) "
# echo "${caa_property_name}=${caa_property_value}"
all_caa += " ${ caa_property_name } = ${ caa_property_value } \n "
else
outln " please report unknown CAA RR $line with flag @ $NODE "
return 7
fi
done <<< " $raw_caa "
2017-10-20 19:58:20 +02:00
sort <<< " $( safe_echo " $all_caa " ) "
2017-10-18 18:43:54 +02:00
return 0
2017-10-20 19:58:20 +02:00
elif grep -q '"' <<< " $raw_caa " ; then
2017-10-18 18:43:54 +02:00
raw_caa = ${ raw_caa // \" / } # strip all ". Now we should have flag, name, value
#caa_flag="$(awk '{ print $1 }' <<< "$raw_caa")"
#caa_property_name="$(awk '{ print $2 }' <<< "$raw_caa")"
#caa_property_value="$(awk '{ print $3 }' <<< "$raw_caa")"
2017-10-20 19:58:20 +02:00
safe_echo " $( sort <<< " $( awk '{ print $2"="$3 }' <<< " $raw_caa " ) " ) "
2017-10-18 18:43:54 +02:00
return 0
2017-01-21 19:43:07 +01:00
else
2017-02-11 14:01:51 +01:00
# no caa record
return 1
2017-01-21 19:43:07 +01:00
fi
2017-02-11 14:01:51 +01:00
2017-01-17 13:57:14 +01:00
# to do:
# 4: check whether $1 is a CNAME and take this
2017-01-21 19:43:07 +01:00
return 0
2017-01-17 13:57:14 +01:00
}
2017-02-15 19:40:06 +01:00
# watch out: $1 can also be a cname! --> all checked
2017-01-17 13:57:14 +01:00
get_mx_record( ) {
local mx = ""
local saved_openssl_conf = " $OPENSSL_CONF "
OPENSSL_CONF = "" # see https://github.com/drwetter/testssl.sh/issues/134
check_resolver_bins
2017-06-01 18:08:13 +02:00
# we need the last two columns here
2017-09-19 00:08:33 +02:00
if type -p host & > /dev/null; then
2017-02-24 16:22:59 +01:00
mxs = " $( host -t MX " $1 " 2>/dev/null | awk '/is handled by/ { print $(NF-1), $NF }' ) "
2017-09-19 00:08:33 +02:00
elif type -p dig & > /dev/null; then
2017-02-24 16:22:59 +01:00
mxs = " $( dig +short -t MX " $1 " 2>/dev/null | awk '/^[0-9]/' ) "
2017-09-19 00:08:33 +02:00
elif type -p drill & > /dev/null; then
2017-02-24 16:22:59 +01:00
mxs = " $( drill mx $1 | awk '/IN[ \t]MX[ \t]+/ { print $(NF-1), $NF }' ) "
2017-09-19 00:08:33 +02:00
elif type -p nslookup & > /dev/null; then
2017-02-24 16:22:59 +01:00
mxs = " $( nslookup -type= MX " $1 " 2>/dev/null | awk '/mail exchanger/ { print $(NF-1), $NF }' ) "
2017-01-17 13:57:14 +01:00
else
fatal "No dig, host, drill or nslookup" -3
fi
OPENSSL_CONF = " $saved_openssl_conf "
echo " $mxs "
}
2015-07-23 17:11:33 +02:00
2017-01-17 13:57:14 +01:00
# set IPADDRs and IP46ADDRs
#
2015-06-16 14:04:44 +02:00
determine_ip_addresses( ) {
2015-09-17 15:30:15 +02:00
local ip4 = ""
local ip6 = ""
2017-06-01 18:08:13 +02:00
ip4 = $( get_a_record $NODE )
ip6 = $( get_aaaa_record $NODE )
IP46ADDRs = $( newline_to_spaces " $ip4 $ip6 " )
2017-01-17 13:57:14 +01:00
if [ [ -n " $CMDLINE_IP " ] ] ; then
2017-06-01 18:08:13 +02:00
# command line has supplied an IP address or "one"
if [ [ " $CMDLINE_IP " = = "one" ] ] ; then
2017-01-17 13:57:14 +01:00
# use first IPv4 address
2017-06-01 18:08:13 +02:00
CMDLINE_IP = " $( head -1 <<< " $ip4 " ) "
[ [ -z " $CMDLINE_IP " ] ] && CMDLINE_IP = " $( head -1 <<< " $ip6 " ) "
fi
2017-01-17 13:57:14 +01:00
NODEIP = " $CMDLINE_IP "
if is_ipv4addr " $NODEIP " ; then
ip4 = " $NODEIP "
elif is_ipv6addr " $NODEIP " ; then
ip6 = " $NODEIP "
else
fatal "couldn't identify supplied \"CMDLINE_IP\"" 2
fi
elif is_ipv4addr " $NODE " ; then
2015-09-17 15:30:15 +02:00
ip4 = " $NODE " # only an IPv4 address was supplied as an argument, no hostname
SNI = "" # override Server Name Indication as we test the IP only
else
ip4 = $( get_local_a $NODE ) # is there a local host entry?
if [ [ -z $ip4 ] ] ; then # empty: no (LOCAL_A is predefined as false)
ip4 = $( get_a_record $NODE )
else
2015-09-26 22:44:33 +02:00
LOCAL_A = true # we have the ip4 from local host entry and need to signal this to testssl
2015-09-17 15:30:15 +02:00
fi
2016-01-23 19:18:33 +01:00
# same now for ipv6
2015-09-17 15:30:15 +02:00
ip6 = $( get_local_aaaa $NODE )
2015-09-26 22:44:33 +02:00
if [ [ -z $ip6 ] ] ; then
2015-09-17 15:30:15 +02:00
ip6 = $( get_aaaa_record $NODE )
2015-09-26 22:44:33 +02:00
else
LOCAL_AAAA = true # we have a local ipv6 entry and need to signal this to testssl
fi
fi
2017-01-17 13:57:14 +01:00
2017-06-01 18:08:13 +02:00
if [ [ -z " $ip4 " ] ] ; then # IPv6 only address
2016-03-05 21:07:49 +01:00
if " $HAS_IPv6 " ; then
2015-09-26 22:44:33 +02:00
IPADDRs = $( newline_to_spaces " $ip6 " )
IP46ADDRs = " $IPADDRs " # IP46ADDRs are the ones to display, IPADDRs the ones to test
fi
else
2016-03-05 21:07:49 +01:00
if " $HAS_IPv6 " && [ [ -n " $ip6 " ] ] ; then
2015-09-26 22:44:33 +02:00
IPADDRs = $( newline_to_spaces " $ip4 $ip6 " )
else
IPADDRs = $( newline_to_spaces " $ip4 " )
fi
2015-09-17 15:30:15 +02:00
fi
2017-01-17 13:57:14 +01:00
if [ [ -z " $IPADDRs " ] ] ; then
2017-06-01 18:08:13 +02:00
if [ [ -n " $ip6 " ] ] ; then
fatal " Only IPv6 address(es) for \" $NODE \" available, maybe add \"-6\" to $0 " -1
else
fatal " No IPv4/IPv6 address(es) for \" $NODE \" available " -1
fi
2015-09-17 15:30:15 +02:00
fi
return 0 # IPADDR and IP46ADDR is set now
2015-06-16 14:04:44 +02:00
}
determine_rdns( ) {
2015-09-17 15:30:15 +02:00
local saved_openssl_conf = " $OPENSSL_CONF "
2016-06-02 09:59:52 +02:00
local nodeip = " $( tr -d '[]' <<< $NODEIP ) " # for DNS we do not need the square brackets of IPv6 addresses
2015-09-17 15:30:15 +02:00
2016-10-28 21:37:10 +02:00
" $NODNS " && rDNS = "--" && return 0
OPENSSL_CONF = "" # see https://github.com/drwetter/testssl.sh/issues/134
2017-06-01 18:08:13 +02:00
check_resolver_bins
2015-11-07 02:16:21 +01:00
if [ [ " $NODE " = = *.local ] ] ; then
2017-09-19 00:08:33 +02:00
if type -p avahi-resolve & >/dev/null; then
2016-05-27 19:54:23 +02:00
rDNS = $( avahi-resolve -a $nodeip 2>/dev/null | awk '{ print $2 }' )
2017-09-19 00:08:33 +02:00
elif type -p dig & >/dev/null; then
2016-05-27 19:54:23 +02:00
rDNS = $( dig -x $nodeip @224.0.0.251 -p 5353 +notcp +noall +answer | awk '/PTR/ { print $NF }' )
2015-11-05 22:54:29 +01:00
fi
2017-09-19 00:08:33 +02:00
elif type -p dig & > /dev/null; then
2016-05-27 19:54:23 +02:00
rDNS = $( dig -x $nodeip +noall +answer | awk '/PTR/ { print $NF }' ) # +short returns also CNAME, e.g. openssl.org
2017-09-19 00:08:33 +02:00
elif type -p host & > /dev/null; then
2016-05-27 19:54:23 +02:00
rDNS = $( host -t PTR $nodeip 2>/dev/null | awk '/pointer/ { print $NF }' )
2017-09-19 00:08:33 +02:00
elif type -p drill & > /dev/null; then
2017-02-15 19:40:06 +01:00
rDNS = $( drill -x ptr $nodeip 2>/dev/null | awk '/ANSWER SECTION/ { getline; print $NF }' )
2017-09-19 00:08:33 +02:00
elif type -p nslookup & > /dev/null; then
2016-05-27 19:54:23 +02:00
rDNS = $( nslookup -type= PTR $nodeip 2>/dev/null | grep -v 'canonical name =' | grep 'name = ' | awk '{ print $NF }' | sed 's/\.$//' )
2015-09-17 15:30:15 +02:00
fi
OPENSSL_CONF = " $saved_openssl_conf " # see https://github.com/drwetter/testssl.sh/issues/134
2015-10-06 12:30:29 +02:00
rDNS = " $( echo $rDNS ) "
2016-06-02 09:59:52 +02:00
[ [ -z " $rDNS " ] ] && rDNS = "--"
2015-09-17 15:30:15 +02:00
return 0
2015-06-16 14:04:44 +02:00
}
2015-05-17 22:43:53 +02:00
2015-07-23 17:11:33 +02:00
# We need to get the IP address of the proxy so we can use it in fd_socket
2015-11-03 10:30:59 +01:00
#
check_proxy( ) {
2015-09-17 15:30:15 +02:00
if [ [ -n " $PROXY " ] ] ; then
2016-09-21 21:42:45 +02:00
if ! " $HAS_PROXY " ; then
fatal " Your $OPENSSL is too old to support the \"-proxy\" option " -5
2015-09-17 15:30:15 +02:00
fi
2017-02-15 19:40:06 +01:00
if [ [ " $PROXY " = = "auto" ] ] ; then
2017-04-07 09:49:44 +02:00
# get $ENV (https_proxy is the one we care about)
PROXY = " ${ https_proxy #* \/ \/ } "
[ [ -z " $PROXY " ] ] && PROXY = " ${ http_proxy #* \/ \/ } "
2017-02-15 19:40:06 +01:00
[ [ -z " $PROXY " ] ] && fatal "you specified \"--proxy=auto\" but \"\$http(s)_proxy\" is empty" 2
fi
2017-04-07 09:49:44 +02:00
# strip off http/https part if supplied:
PROXY = " ${ PROXY /http \: \/ \/ / } "
PROXY = " ${ PROXY /https \: \/ \/ / } "
PROXYNODE = " ${ PROXY % : * } "
PROXYPORT = " ${ PROXY #* : } "
2017-02-15 19:40:06 +01:00
is_number " $PROXYPORT " || fatal " Proxy port cannot be determined from \" $PROXY \" " 2
2015-09-17 15:30:15 +02:00
2015-09-18 15:12:01 +02:00
#if is_ipv4addr "$PROXYNODE" || is_ipv6addr "$PROXYNODE" ; then
# IPv6 via openssl -proxy: that doesn't work. Sockets does
2017-02-15 19:40:06 +01:00
#FIXME: finish this with LibreSSL which supports an IPv6 proxy
2015-09-18 15:12:01 +02:00
if is_ipv4addr " $PROXYNODE " ; then
PROXYIP = " $PROXYNODE "
else
2017-04-07 09:49:44 +02:00
PROXYIP = " $( get_a_record " $PROXYNODE " 2>/dev/null | grep -v alias | sed 's/^.*address //' ) "
2016-07-04 23:05:12 +02:00
[ [ -z " $PROXYIP " ] ] && fatal " Proxy IP cannot be determined from \" $PROXYNODE \" " "2"
2015-09-18 15:12:01 +02:00
fi
2015-09-17 15:30:15 +02:00
PROXY = " -proxy $PROXYIP : $PROXYPORT "
fi
2015-07-23 17:11:33 +02:00
}
2015-06-16 14:04:44 +02:00
2015-11-03 13:13:10 +01:00
# this is only being called from determine_optimal_proto in order to check whether we have a server
# with client authentication, a server with no SSL session ID switched off
#
sclient_auth( ) {
[ [ $1 -eq 0 ] ] && return 0 # no client auth (CLIENT_AUTH=false is preset globally)
if [ [ -n $( awk '/Master-Key: / { print $2 }' " $2 " ) ] ] ; then # connect succeeded
if grep -q '^<<< .*CertificateRequest' " $2 " ; then # CertificateRequest message in -msg
2016-01-23 19:18:33 +01:00
CLIENT_AUTH = true
2015-11-03 13:13:10 +01:00
return 0
fi
2016-01-23 19:18:33 +01:00
if [ [ -z $( awk '/Session-ID: / { print $2 }' " $2 " ) ] ] ; then # probably no SSL session
2015-11-03 13:13:10 +01:00
if [ [ 2 -eq $( grep -c CERTIFICATE " $2 " ) ] ] ; then # do another sanity check to be sure
CLIENT_AUTH = false
2016-04-21 18:04:33 +02:00
NO_SSL_SESSIONID = true # NO_SSL_SESSIONID is preset globally to false for all other cases
2015-11-03 13:13:10 +01:00
return 0
fi
fi
fi
# what's left now is: master key empty, handshake returned not successful, session ID empty --> not sucessful
return 1
}
2016-01-23 19:18:33 +01:00
# this function determines OPTIMAL_PROTO. It is a workaround function as under certain circumstances
2015-11-03 13:13:10 +01:00
# (e.g. IIS6.0 and openssl 1.0.2 as opposed to 1.0.1) needs a protocol otherwise s_client -connect will fail!
# Circumstances observed so far: 1.) IIS 6 2.) starttls + dovecot imap
2017-03-31 12:24:25 +02:00
# The first try in the loop is empty as we prefer not to specify always a protocol if we can get along w/o it
2015-09-14 11:03:10 +02:00
#
determine_optimal_proto( ) {
2017-04-18 23:15:32 +02:00
local all_failed = true
local tmp = ""
2015-09-17 15:30:15 +02:00
2015-10-11 23:07:16 +02:00
>$ERRFILE
2015-09-17 15:30:15 +02:00
if [ [ -n " $1 " ] ] ; then
2017-04-18 23:15:32 +02:00
# starttls workaround needed see https://github.com/drwetter/testssl.sh/issues/188 -- kind of odd
2015-09-17 15:30:15 +02:00
for STARTTLS_OPTIMAL_PROTO in -tls1_2 -tls1 -ssl3 -tls1_1 -ssl2; do
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $STARTTLS_OPTIMAL_PROTO $BUGS -connect " $NODEIP :$PORT " $PROXY -msg -starttls $1 " ) </dev/null >$TMPFILE 2>>$ERRFILE
2015-11-03 13:13:10 +01:00
if sclient_auth $? $TMPFILE ; then
2017-04-18 23:15:32 +02:00
all_failed = false
2015-10-11 23:07:16 +02:00
break
fi
2017-04-18 23:15:32 +02:00
all_failed = true
2015-09-17 15:30:15 +02:00
done
2017-04-18 23:15:32 +02:00
" $all_failed " && STARTTLS_OPTIMAL_PROTO = ""
2015-09-17 15:30:15 +02:00
debugme echo " STARTTLS_OPTIMAL_PROTO: $STARTTLS_OPTIMAL_PROTO "
else
2016-07-26 17:10:20 +02:00
for OPTIMAL_PROTO in '' -tls1_2 -tls1 -ssl3 -tls1_1 -ssl2; do
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " $OPTIMAL_PROTO $BUGS -connect " $NODEIP :$PORT " -msg $PROXY $SNI " ) </dev/null >$TMPFILE 2>>$ERRFILE
2015-11-03 13:13:10 +01:00
if sclient_auth $? $TMPFILE ; then
2017-04-18 23:15:32 +02:00
# we use the successful handshake at least to get one valid protocol supported -- it saves us time later
if [ [ -z " $OPTIMAL_PROTO " ] ] ; then
# convert to openssl terminology
tmp = $( get_protocol $TMPFILE )
tmp = ${ tmp / \. /_ }
tmp = ${ tmp /v/ }
tmp = " $( tolower $tmp ) "
2017-10-02 13:48:55 +02:00
add_tls_offered " ${ tmp } " yes
2017-04-18 23:15:32 +02:00
else
2017-10-02 13:48:55 +02:00
add_tls_offered " ${ OPTIMAL_PROTO /-/ } " yes
2017-04-18 23:15:32 +02:00
fi
debugme echo " one proto determined: $tmp "
all_failed = false
2015-10-11 23:07:16 +02:00
break
fi
2017-04-18 23:15:32 +02:00
all_failed = true
2015-09-17 15:30:15 +02:00
done
2017-04-18 23:15:32 +02:00
" $all_failed " && OPTIMAL_PROTO = ""
2015-09-17 15:30:15 +02:00
debugme echo " OPTIMAL_PROTO: $OPTIMAL_PROTO "
2016-07-26 17:13:45 +02:00
if [ [ " $OPTIMAL_PROTO " = = "-ssl2" ] ] ; then
2017-02-25 16:31:30 +01:00
prln_magenta " $NODEIP : $PORT appears to only support SSLv2. "
2016-12-29 22:02:07 +01:00
ignore_no_or_lame " Type \"yes\" to proceed and accept false negatives or positives" "yes"
2016-07-26 17:13:45 +02:00
[ [ $? -ne 0 ] ] && exit -2
fi
2015-09-17 15:30:15 +02:00
fi
2015-09-22 20:09:26 +02:00
grep -q '^Server Temp Key' $TMPFILE && HAS_DH_BITS = true # FIX #190
2015-09-17 15:30:15 +02:00
2017-04-18 23:15:32 +02:00
if " $all_failed " ; then
2015-09-17 15:30:15 +02:00
outln
2016-03-05 21:07:49 +01:00
if " $HAS_IPv6 " ; then
2015-10-05 09:56:21 +02:00
pr_bold " Your $OPENSSL is not IPv6 aware, or $NODEIP : $PORT "
else
pr_bold " $NODEIP : $PORT "
fi
2015-10-11 23:07:16 +02:00
tmpfile_handle $FUNCNAME .txt
2017-02-25 16:31:30 +01:00
prln_bold "doesn't seem to be a TLS/SSL enabled server" ;
2016-12-29 22:02:07 +01:00
ignore_no_or_lame " The results might look ok but they could be nonsense. Really proceed ? (\"yes\" to continue)" "yes"
2015-09-17 15:30:15 +02:00
[ [ $? -ne 0 ] ] && exit -2
fi
2015-10-11 23:07:16 +02:00
tmpfile_handle $FUNCNAME .txt
return 0
2015-09-14 11:03:10 +02:00
}
2017-06-29 23:57:32 +02:00
# arg1: ftp smtp, pop3, imap, xmpp, telnet, ldap, postgres, mysql (maybe with trailing s)
2015-06-16 14:04:44 +02:00
determine_service( ) {
2016-01-23 19:18:33 +01:00
local ua
2015-09-17 15:30:15 +02:00
local protocol
2015-10-11 23:07:16 +02:00
if ! fd_socket; then # check if we can connect to $NODEIP:$PORT
2015-09-19 15:03:40 +02:00
[ [ -n " $PROXY " ] ] && \
fatal " You're sure $PROXYNODE : $PROXYPORT allows tunneling here? Can't connect to \" $NODEIP : $PORT \" " -2 || \
fatal " Can't connect to \" $NODEIP : $PORT \"\nMake sure a firewall is not between you and your scanning target! " -2
2015-09-17 15:30:15 +02:00
fi
close_socket
2015-10-15 14:15:07 +02:00
datebanner " Start"
2015-09-17 15:30:15 +02:00
outln
2017-05-15 19:47:13 +02:00
if [ [ -z " $1 " ] ] ; then # no STARTTLS.
2015-10-11 23:07:16 +02:00
determine_optimal_proto " $1 "
2015-09-17 15:30:15 +02:00
$SNEAKY && \
ua = " $UA_SNEAKY " || \
ua = " $UA_STD "
GET_REQ11 = " GET $URL_PATH HTTP/1.1\r\nHost: $NODE \r\nUser-Agent: $ua \r\nConnection: Close\r\nAccept: text/*\r\n\r\n "
2017-03-31 12:24:25 +02:00
# HEAD_REQ11="HEAD $URL_PATH HTTP/1.1\r\nHost: $NODE\r\nUser-Agent: $ua\r\nAccept: text/*\r\n\r\n"
# GET_REQ10="GET $URL_PATH HTTP/1.0\r\nUser-Agent: $ua\r\nConnection: Close\r\nAccept: text/*\r\n\r\n"
# HEAD_REQ10="HEAD $URL_PATH HTTP/1.0\r\nUser-Agent: $ua\r\nAccept: text/*\r\n\r\n"
2016-10-11 22:30:30 +02:00
service_detection $OPTIMAL_PROTO
2017-05-15 19:47:13 +02:00
else # STARTTLS
2016-12-08 19:54:44 +01:00
if [ [ " $1 " = = postgres ] ] ; then
protocol = "postgres"
else
2017-05-15 19:47:13 +02:00
protocol = ${ 1 %s } # strip trailing 's' in ftp(s), smtp(s), pop3(s), etc
2016-12-08 19:54:44 +01:00
fi
2015-09-17 15:30:15 +02:00
case " $protocol " in
2017-06-29 23:57:32 +02:00
ftp| smtp| pop3| imap| xmpp| telnet| ldap| postgres| mysql)
2015-09-17 15:30:15 +02:00
STARTTLS = " -starttls $protocol "
SNI = ""
2016-07-04 23:05:12 +02:00
if [ [ " $protocol " = = xmpp ] ] ; then
2015-09-17 15:30:15 +02:00
# for XMPP, openssl has a problem using -connect $NODEIP:$PORT. thus we use -connect $NODE:$PORT instead!
NODEIP = " $NODE "
if [ [ -n " $XMPP_HOST " ] ] ; then
2016-09-21 21:42:45 +02:00
if ! " $HAS_XMPP " ; then
2016-07-04 23:05:12 +02:00
fatal " Your $OPENSSL does not support the \"-xmpphost\" option " -5
2015-09-17 15:30:15 +02:00
fi
2017-05-15 19:47:13 +02:00
STARTTLS = " $STARTTLS -xmpphost $XMPP_HOST " # small hack -- instead of changing calls all over the place
2015-09-17 15:30:15 +02:00
# see http://xmpp.org/rfcs/rfc3920.html
2017-09-18 23:25:07 +02:00
else
if is_ipv4addr " $NODE " ; then
# XMPP needs a jabber domainname
if [ [ -n " $rDNS " ] ] ; then
prln_warning " IP address doesn't work for XMPP, trying PTR record $rDNS "
# remove trailing .
NODE = ${ rDNS %%. }
NODEIP = ${ rDNS %%. }
else
fatal "No DNS supplied and no PTR record available which I can try for XMPP" -1
fi
fi
2015-09-17 15:30:15 +02:00
fi
2017-09-18 23:25:07 +02:00
elif [ [ " $protocol " = = postgres ] ] ; then
2016-12-08 19:54:44 +01:00
# Check if openssl version supports postgres.
if ! " $HAS_POSTGRES " ; then
fatal " Your $OPENSSL does not support the \"-starttls postgres\" option " -5
fi
2017-09-18 23:25:07 +02:00
elif [ [ " $protocol " = = mysql ] ] ; then
2017-06-29 23:57:32 +02:00
# Check if openssl version supports mysql.
if ! " $HAS_MYSQL " ; then
fatal " Your $OPENSSL does not support the \"-starttls mysql\" option " -5
fi
fi
2017-09-19 18:37:03 +02:00
$OPENSSL s_client $( s_client_options " -connect $NODEIP : $PORT $PROXY $BUGS $STARTTLS " ) 2>$ERRFILE >$TMPFILE </dev/null
2015-09-17 15:30:15 +02:00
if [ [ $? -ne 0 ] ] ; then
2017-05-15 19:47:13 +02:00
debugme cat $TMPFILE | head -25
2015-09-17 15:30:15 +02:00
outln
fatal " $OPENSSL couldn't establish STARTTLS via $protocol to $NODEIP : $PORT " -2
fi
2015-09-22 20:09:26 +02:00
grep -q '^Server Temp Key' $TMPFILE && HAS_DH_BITS = true # FIX #190
2015-10-05 09:56:21 +02:00
out " Service set: $CORRECT_SPACES STARTTLS via "
2017-02-10 20:47:49 +01:00
out " $( toupper " $protocol " ) "
2017-07-01 10:11:34 +02:00
[ [ " $protocol " = = mysql ] ] && out " -- attention, this is experimental"
fileout "service" "INFO" " $protocol "
2017-02-10 20:47:49 +01:00
[ [ -n " $XMPP_HOST " ] ] && out " (XMPP domain=\' $XMPP_HOST \') "
2015-09-17 15:30:15 +02:00
outln
; ;
*) outln
2017-06-29 23:57:32 +02:00
fatal "momentarily only ftp, smtp, pop3, imap, xmpp, telnet, ldap, postgres, and mysql allowed" -4
2015-09-17 15:30:15 +02:00
; ;
esac
fi
2015-10-15 14:15:07 +02:00
tmpfile_handle $FUNCNAME .txt
2015-09-17 15:30:15 +02:00
return 0 # OPTIMAL_PROTO, GET_REQ*/HEAD_REQ* is set now
2015-05-17 22:43:53 +02:00
}
display_rdns_etc( ) {
2017-02-21 19:36:23 +01:00
local ip further_ip_addrs = ""
2016-06-02 09:59:52 +02:00
local nodeip = " $( tr -d '[]' <<< $NODEIP ) " # for displaying IPv6 addresses we don't need []
2015-09-18 15:12:01 +02:00
if [ [ -n " $PROXY " ] ] ; then
2015-10-05 09:56:21 +02:00
out " Via Proxy: $CORRECT_SPACES "
2015-09-18 15:12:01 +02:00
outln " $PROXYIP : $PROXYPORT "
fi
2015-10-05 09:56:21 +02:00
if [ [ $( count_words " $IP46ADDRs " ) -gt 1 ] ] ; then
2017-02-21 19:36:23 +01:00
out " further IP addresses: $CORRECT_SPACES "
2015-10-05 09:56:21 +02:00
for ip in $IP46ADDRs ; do
if [ [ " $ip " = = " $NODEIP " ] ] || [ [ " [ $ip ] " = = " $NODEIP " ] ] ; then
continue
else
2017-02-21 19:36:23 +01:00
further_ip_addrs += " $ip "
2015-10-05 09:56:21 +02:00
fi
2015-05-17 22:43:53 +02:00
done
2017-03-28 19:54:54 +02:00
outln " $( out_row_aligned_max_width " $further_ip_addrs " " $CORRECT_SPACES " $TERM_WIDTH ) "
2015-09-17 15:30:15 +02:00
fi
if " $LOCAL_A " ; then
2016-06-07 09:08:48 +02:00
outln " A record via $CORRECT_SPACES /etc/hosts "
2015-12-08 13:31:52 +01:00
elif [ [ -n " $CMDLINE_IP " ] ] ; then
2016-06-07 09:08:48 +02:00
outln " A record via $CORRECT_SPACES supplied IP \" $CMDLINE_IP \" "
2015-09-17 15:30:15 +02:00
fi
2015-10-05 09:56:21 +02:00
if [ [ -n " $rDNS " ] ] ; then
2017-02-21 22:59:33 +01:00
out " $( printf " %-23s %s" " rDNS ( $nodeip ): " ) "
2017-03-28 19:54:54 +02:00
out " $( out_row_aligned_max_width " $rDNS " " $CORRECT_SPACES " $TERM_WIDTH ) "
2015-10-05 09:56:21 +02:00
fi
2015-05-17 22:43:53 +02:00
}
datebanner( ) {
2017-03-25 19:37:30 +01:00
local scan_time_f = ""
if [ [ " $1 " = ~ Done ] ] ; then
2017-03-26 19:34:02 +02:00
scan_time_f = " $( printf "%04ss" " $SCAN_TIME " ) " # 4 digits because of windows
2017-03-25 19:37:30 +01:00
pr_reverse " $1 $( date +%F) $( date +%T) [ $scan_time_f ] -->> $NODEIP : $PORT ( $NODE ) <<-- "
else
pr_reverse " $1 $( date +%F) $( date +%T) -->> $NODEIP : $PORT ( $NODE ) <<-- "
fi
2015-10-15 14:15:07 +02:00
outln "\n"
[ [ " $1 " = ~ Start ] ] && display_rdns_etc
2015-05-17 22:43:53 +02:00
}
2015-08-28 00:15:51 +02:00
# one line with char $1 over screen width $2
2015-10-05 09:56:21 +02:00
draw_line( ) {
Handle --file option
Introduced "trick" so that if the `--file` option is used, `html_header()` will only be called once before anything is printed and `html_footer()` will only be called once after all printing is complete. With this, `html_header()` now delete the output file if it exists.
Also introduced the `html_reserved()`, which is called for all text to be sent to `out_html()`. `html_reserved()` converts any HTML reserved characters (", ', &, <, >) to their corresponding entity names (", ', &, <, >).
2017-02-09 23:03:21 +01:00
out " $( printf -- " $1 " '%.s' $( eval "echo {1.." $(( $2 )) "}" ) ) "
2015-05-31 14:40:12 +02:00
}
2015-05-17 22:43:53 +02:00
2015-07-23 17:11:33 +02:00
2016-06-23 19:42:26 +02:00
run_mx_all_ips( ) {
2015-09-17 15:30:15 +02:00
local mxs mx
2016-01-23 19:18:33 +01:00
local mxport
2015-09-17 15:30:15 +02:00
local -i ret = 0
STARTTLS_PROTOCOL = "smtp"
# test first higher priority servers
mxs = $( get_mx_record " $1 " | sort -n | sed -e 's/^.* //' -e 's/\.$//' | tr '\n' ' ' )
mxport = ${ 2 :- 25 }
2016-06-23 19:42:26 +02:00
if [ [ -n " $LOGFILE " ] ] ; then
prepare_logging
else
2017-11-14 19:41:25 +01:00
prepare_logging " ${ FNAME_PREFIX } .mx- $1 "
2016-06-23 19:42:26 +02:00
fi
2015-09-17 15:30:15 +02:00
if [ [ -n " $mxs " ] ] && [ [ " $mxs " != ' ' ] ] ; then
[ [ $mxport = = "465" ] ] && \
STARTTLS_PROTOCOL = "" # no starttls for Port 465, on all other ports we speak starttls
pr_bold " Testing now all MX records (on port $mxport ): " ; outln " $mxs "
for mx in $mxs ; do
2016-06-23 19:42:26 +02:00
draw_line "-" $(( TERM_WIDTH * 2 / 3 ))
2015-09-17 15:30:15 +02:00
outln
2016-01-23 19:18:33 +01:00
parse_hn_port " $mx : $mxport "
2015-09-17 15:30:15 +02:00
determine_ip_addresses || continue
if [ [ $( count_words " $( echo -n " $IPADDRs " ) " ) -gt 1 ] ] ; then # we have more than one ipv4 address to check
pr_bold " Testing all IPv4 addresses (port $PORT ): " ; outln " $IPADDRs "
for ip in $IPADDRs ; do
NODEIP = " $ip "
lets_roll " ${ STARTTLS_PROTOCOL } "
done
else
NODEIP = " $IPADDRs "
lets_roll " ${ STARTTLS_PROTOCOL } "
fi
ret = $(( $? + ret))
done
2016-06-23 19:42:26 +02:00
draw_line "-" $(( TERM_WIDTH * 2 / 3 ))
2015-09-17 15:30:15 +02:00
outln
pr_bold " Done testing now all MX records (on port $mxport ): " ; outln " $mxs "
else
2017-02-25 16:31:30 +01:00
prln_bold " $1 has no MX records(s) "
2015-09-17 15:30:15 +02:00
fi
return $ret
2015-05-17 22:43:53 +02:00
}
2017-04-11 23:05:27 +02:00
# If run_mass_testing() is being used, then create the command line
# for the test based on the global command line (all elements of the
# command line provided to the parent, except the --file option) and the
# specific command line options for the test to be run. Each argument
# in the command line needs to be a separate element in an array in order
# to deal with word splitting within file names (see #702).
#
# If run_mass_testing_parallel() is being used, then in addition to the above,
2017-04-12 21:50:55 +02:00
# modify global command line for child tests so that if all (JSON, CSV, HTML)
2017-04-11 23:05:27 +02:00
# output is to go into a single file, each child will have its output placed in
# a separate, named file, so that the separate files can be concatenated
# together once they are complete to create the single file.
#
# If run_mass_testing() is being used, then "$1" is "serial". If
# run_mass_testing_parallel() is being used, then "$1" is "parallel XXXXXXXX"
# where XXXXXXXX is the number of the test being run.
create_mass_testing_cmdline( ) {
local testing_type = " $1 "
local cmd test_number
local -i nr_cmds = 0
local skip_next = false
MASS_TESTING_CMDLINE = ( )
[ [ " $testing_type " = ~ parallel ] ] && read testing_type test_number <<< " $testing_type "
2017-04-12 21:50:55 +02:00
2017-05-19 17:00:30 +02:00
# Start by adding the elements from the global command line to the command line for the
# test. If run_mass_testing_parallel(), then modify the command line so that, when
# required, each child process sends its test # results to a separate file. If a cmd
# uses '=' for supplying a value we just skip next parameter (we don't use 'parse_opt_equal_sign' here)
debugme echo " ${ CMDLINE_ARRAY [@] } "
2017-04-11 23:05:27 +02:00
for cmd in " ${ CMDLINE_ARRAY [@] } " ; do
" $skip_next " && skip_next = false && continue
if [ [ " $cmd " = = "--file" * ] ] ; then
# Don't include the "--file[=...] argument in the child's command
# line, but do include "--warnings=batch".
MASS_TESTING_CMDLINE[ nr_cmds] = "--warnings=batch"
nr_cmds += 1
2017-05-19 17:00:30 +02:00
# next is the file itself, as no '=' was supplied
[ [ " $cmd " = = '--file' ] ] && skip_next = true
2017-04-11 23:05:27 +02:00
elif [ [ " $testing_type " = = "serial" ] ] ; then
2017-05-23 20:52:25 +02:00
if " $JSONHEADER " && [ [ " $cmd " = = "--jsonfile-pretty" * ] ] ; then
>" $TEMPDIR /jsonfile_child.json "
MASS_TESTING_CMDLINE[ nr_cmds] = " --jsonfile-pretty= $TEMPDIR /jsonfile_child.json "
# next is the jsonfile itself, as no '=' was supplied
[ [ " $cmd " = = --jsonfile-pretty ] ] && skip_next = true
elif " $JSONHEADER " && [ [ " $cmd " = = "--jsonfile" * ] ] ; then
>" $TEMPDIR /jsonfile_child.json "
MASS_TESTING_CMDLINE[ nr_cmds] = " --jsonfile= $TEMPDIR /jsonfile_child.json "
# next is the jsonfile itself, as no '=' was supplied
[ [ " $cmd " = = --jsonfile ] ] && skip_next = true
else
MASS_TESTING_CMDLINE[ nr_cmds] = " $cmd "
fi
2017-04-11 23:05:27 +02:00
nr_cmds += 1
else
case " $cmd " in
2017-10-20 16:32:57 +02:00
--jsonfile| --jsonfile= *| -oj| -oj= *)
2017-04-11 23:05:27 +02:00
# If <jsonfile> is a file, then have provide a different
# file name to each child process. If <jsonfile> is a
# directory, then just pass it on to the child processes.
if " $JSONHEADER " ; then
MASS_TESTING_CMDLINE[ nr_cmds] = " --jsonfile= $TEMPDIR /jsonfile_ ${ test_number } .json "
2017-05-19 17:00:30 +02:00
# next is the jsonfile itself, as no '=' was supplied
2017-04-11 23:05:27 +02:00
[ [ " $cmd " = = --jsonfile ] ] && skip_next = true
else
MASS_TESTING_CMDLINE[ nr_cmds] = " $cmd "
fi
; ;
2017-10-20 16:32:57 +02:00
--jsonfile-pretty| --jsonfile-pretty= *| -oJ| -oJ= *)
2017-04-11 23:05:27 +02:00
if " $JSONHEADER " ; then
MASS_TESTING_CMDLINE[ nr_cmds] = " --jsonfile-pretty= $TEMPDIR /jsonfile_ ${ test_number } .json "
[ [ " $cmd " = = --jsonfile-pretty ] ] && skip_next = true
else
MASS_TESTING_CMDLINE[ nr_cmds] = " $cmd "
fi
; ;
2017-10-20 16:32:57 +02:00
--csvfile| --csvfile= *| -oC| -oC= *)
2017-04-11 23:05:27 +02:00
if " $CSVHEADER " ; then
MASS_TESTING_CMDLINE[ nr_cmds] = " --csvfile= $TEMPDIR /csvfile_ ${ test_number } .csv "
[ [ " $cmd " = = --csvfile ] ] && skip_next = true
else
MASS_TESTING_CMDLINE[ nr_cmds] = " $cmd "
fi
; ;
2017-10-20 16:32:57 +02:00
--htmlfile| --htmlfile= *| -oH| -oH= *)
2017-04-11 23:05:27 +02:00
if " $HTMLHEADER " ; then
MASS_TESTING_CMDLINE[ nr_cmds] = " --htmlfile= $TEMPDIR /htmlfile_ ${ test_number } .html "
[ [ " $cmd " = = --htmlfile ] ] && skip_next = true
else
MASS_TESTING_CMDLINE[ nr_cmds] = " $cmd "
fi
; ;
*)
MASS_TESTING_CMDLINE[ nr_cmds] = " $cmd "
; ;
esac
nr_cmds += 1
fi
done
2017-05-19 17:00:30 +02:00
# Now add the command line arguments for the specific test to the command line.
# Skip the first argument sent to this function, since it specifies the type of testing being performed.
2017-04-11 23:05:27 +02:00
shift
while [ [ $# -gt 0 ] ] ; do
MASS_TESTING_CMDLINE[ nr_cmds] = " $1 "
nr_cmds += 1
shift
done
2017-05-19 17:00:30 +02:00
return 0
2017-04-11 23:05:27 +02:00
}
2017-06-12 22:56:36 +02:00
2017-06-13 15:19:28 +02:00
ports2starttls( ) {
local tcp_port = $1
2017-06-13 18:42:07 +02:00
local ret = 0
2017-06-13 15:19:28 +02:00
2017-06-13 18:42:07 +02:00
# https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers
2017-06-13 15:19:28 +02:00
case $tcp_port in
2017-06-13 18:42:07 +02:00
21) echo "-t ftp " ; ;
23) echo "-t telnet " ; ;
119| 433) echo "-t nntp " ; ; # to come
25| 587) echo "-t smtp " ; ;
110) echo "-t pop3 " ; ;
143) echo "-t imap " ; ;
389) echo "-t ldap " ; ;
2017-06-29 23:57:32 +02:00
3306) echo "-t mysql " ; ;
2017-06-13 18:42:07 +02:00
5222) echo "-t xmpp " ; ; # domain of jabber server maybe needed
2017-06-13 15:19:28 +02:00
5432) echo "-t postgres" ; ;
2017-06-13 18:42:07 +02:00
563) ; ; # NNTPS
636) ; ; # LDAP
1443| 8443| 443| 981) ; ; # HTTPS
465) ; ; # HTTPS | SMTP
631) ; ; # CUPS
853) ; ; # DNS over TLS
995| 993) ; ; # POP3|IMAP
3389) ; ; # RDP
*) ret = 1 ; ; # we don't know this ports so we rather do not scan it
2017-06-13 15:19:28 +02:00
esac
2017-06-13 18:42:07 +02:00
return $ret
2017-06-13 15:19:28 +02:00
}
2017-06-12 17:09:52 +02:00
nmap_to_plain_file( ) {
local target_fname = ""
local oneline = ""
2017-06-13 18:42:07 +02:00
local ip hosttxt round_brackets ports_specs starttls
local tmp port host_spec protocol dontcare dontcare1
2017-06-12 22:56:36 +02:00
#FIXME: IPv6 is missing here
2017-06-12 17:09:52 +02:00
2017-06-12 22:56:36 +02:00
# Ok, since we are here we are sure to have an nmap file. To avoid questions we make sure it's the right format too
2017-06-12 18:23:55 +02:00
if [ [ " $( head -1 " $FNAME " ) " = ~ ( -oG ) ( .*) ] ] ; then
2017-06-12 22:56:36 +02:00
# yes, greppable
2017-06-12 18:23:55 +02:00
if [ [ $( grep -c Status " $FNAME " ) -ge 1 ] ] ; then
2017-06-14 09:24:20 +02:00
[ [ $( grep -c '\/open\/' " $FNAME " ) -eq 0 ] ] && \
2017-06-12 18:23:55 +02:00
fatal " Nmap file $FNAME should contain at least one open port " -1
else
fatal "strange, nmap grepable misses \"Status\"" -1
fi
else
fatal " Nmap file $FNAME is not in grep(p)able format (-oG filename.gmap) " -1
fi
2017-06-12 22:56:36 +02:00
# strip extension and create output file *.txt in same folder
target_fname = " ${ FNAME %.* } .txt "
> " ${ target_fname } "
if [ [ $? -ne 0 ] ] ; then
# try to just create ${FNAME%.*}.txt in the same dir as the gmap file failed.
# backup is using one in $TEMPDIR
target_fname = " ${ target_fname ##* \/ } " # strip path (Unix)
target_fname = " ${ target_fname ##* \\ } " # strip path (Dos)
target_fname = " $TEMPDIR / $target_fname "
> " ${ target_fname } " || fatal " Cannot create \" ${ target_fname } \" " -1
fi
2017-06-13 18:42:07 +02:00
# Line x: "Host: AAA.BBB.CCC.DDD (<FQDN>) Status: Up"
2017-06-12 22:56:36 +02:00
# Line x+1: "Host: AAA.BBB.CCC.DDD (<FQDN>) Ports: 443/open/tcp//https///"
2017-06-13 18:42:07 +02:00
# (or): Host: AAA.BBB.CCC.DDD (<FQDN>) Ports: 22/open/tcp//ssh//<banner>/, 25/open/tcp//smtp//<banner>/, 443/open/tcp//ssl|http//<banner>
while read -r hosttxt ip round_brackets tmp ports_specs; do
2017-10-09 15:13:46 +02:00
[ [ " $ports_specs " = ~ "Status: " ] ] && continue # we don't need this
[ [ " $ports_specs " = ~ '/open/tcp/' ] ] || continue # no open tcp at all for this IP --> move
2017-06-14 09:24:20 +02:00
host_spec = " $ip "
2017-06-12 22:56:36 +02:00
fqdn = " ${ round_brackets / \( / } "
fqdn = " ${ fqdn / \) / } "
if [ [ -n " $fqdn " ] ] ; then
tmp = " $( get_a_record " $fqdn " ) "
debugme echo " $tmp \?= $ip "
if [ [ " $tmp " = = " $ip " ] ] ; then
2017-06-13 18:42:07 +02:00
host_spec = " $fqdn "
2017-06-12 22:56:36 +02:00
fi
2017-06-13 18:42:07 +02:00
fi
2017-06-14 09:24:20 +02:00
while read -r oneline; do
2017-06-13 18:42:07 +02:00
# 25/open/tcp//smtp//<banner>/,
2017-10-09 15:13:46 +02:00
[ [ " $oneline " = ~ '/open/tcp/' ] ] || continue # no open tcp for this port on this IP --> move on
2017-06-13 18:42:07 +02:00
IFS = / read -r port dontcare protocol dontcare1 <<< " $oneline "
starttls = " $( ports2starttls $port ) "
[ [ $? -eq 1 ] ] && continue # nmap got a port but we don't know how to speak to
[ [ " $DEBUG " -ge 1 ] ] && echo " ${ starttls } $host_spec : $port "
echo " ${ starttls } ${ host_spec } : ${ port } " >>" $target_fname "
done < <( tr ',' '\n' <<< " $ports_specs " )
2017-06-12 17:09:52 +02:00
done < " $FNAME "
2017-06-13 18:42:07 +02:00
[ [ " $DEBUG " -ge 1 ] ] && echo
2017-06-12 22:56:36 +02:00
2017-06-12 17:09:52 +02:00
[ [ -s " $target_fname " ] ] || \
fatal " Couldn't find any open port in $FNAME " -3
export FNAME = $target_fname
}
2017-03-31 12:24:25 +02:00
run_mass_testing( ) {
2015-12-08 13:31:52 +01:00
local cmdline = ""
2017-03-31 12:24:25 +02:00
local first = true
2017-06-12 17:09:52 +02:00
local gmapadd = ""
2017-06-12 18:23:55 +02:00
local saved_fname = " $FNAME "
2015-12-08 13:31:52 +01:00
2017-03-31 12:24:25 +02:00
if [ [ ! -r " $FNAME " ] ] && " $IKNOW_FNAME " ; then
2016-07-04 23:05:12 +02:00
fatal " Can't read file \" $FNAME \" " "2"
2015-12-08 13:31:52 +01:00
fi
2017-06-12 18:23:55 +02:00
2017-06-12 17:09:52 +02:00
if [ [ " $( head -1 " $FNAME " ) " = ~ ( Nmap [ 4-8] ) ( .*) ( scan initiated ) ( .*) ] ] ; then
2017-06-12 18:23:55 +02:00
gmapadd = "grep(p)able nmap "
nmap_to_plain_file
2017-06-12 17:09:52 +02:00
fi
2017-03-31 12:24:25 +02:00
2017-06-12 18:23:55 +02:00
pr_reverse " ====== Running in file batch mode with ${ gmapadd } file=\" $saved_fname \" ====== " ; outln "\n"
2015-12-08 13:31:52 +01:00
while read cmdline; do
2017-04-07 09:49:44 +02:00
cmdline = " $( filter_input " $cmdline " ) "
2015-12-08 13:31:52 +01:00
[ [ -z " $cmdline " ] ] && continue
[ [ " $cmdline " = = "EOF" ] ] && break
2017-04-11 23:05:27 +02:00
# Create the command line for the child in the form of an array (see #702)
create_mass_testing_cmdline "serial" $cmdline
2016-06-23 19:42:26 +02:00
draw_line "=" $(( TERM_WIDTH / 2 )) ; outln;
2017-04-12 16:00:40 +02:00
outln " $( create_cmd_line_string " $0 " " ${ MASS_TESTING_CMDLINE [@] } " ) "
2017-04-12 18:15:27 +02:00
# we call ourselves here. $do_mass_testing is the parent, $CHILD_MASS_TESTING... you figured
2017-09-19 00:08:33 +02:00
if [ [ -z " $( type -p " $0 " ) " ] ] ; then
2017-04-12 18:15:27 +02:00
CHILD_MASS_TESTING = true " $RUN_DIR / $PROG_NAME " " ${ MASS_TESTING_CMDLINE [@] } "
else
CHILD_MASS_TESTING = true " $0 " " ${ MASS_TESTING_CMDLINE [@] } "
fi
2017-05-23 20:52:25 +02:00
if " $JSONHEADER " && [ [ -s " $TEMPDIR /jsonfile_child.json " ] ] ; then
# Need to ensure that a separator is only added if the test
# produced some JSON output.
" $first " || fileout_separator # this is needed for appended output, see #687
first = false
cat " $TEMPDIR /jsonfile_child.json " >> " $JSONFILE "
fi
2017-03-31 12:24:25 +02:00
done < " ${ FNAME } "
2016-01-31 21:02:18 +01:00
return $?
2015-12-08 13:31:52 +01:00
}
2017-05-16 20:16:35 +02:00
# This function is called when it has been determined that the next child
# process has completed or it has been stopped. If the child process completed,
# then this process prints the child process's output to the terminal and, if
# appropriate, adds any JSON, CSV, and HTML output it has created to the
# appropriate file. If the child process was stopped, then a message indicating
# that is printed, but the incomplete results are not used.
2017-04-05 22:58:57 +02:00
get_next_message_testing_parallel_result( ) {
draw_line "=" $(( TERM_WIDTH / 2 )) ; outln;
outln " ${ PARALLEL_TESTING_CMDLINE [NEXT_PARALLEL_TEST_TO_FINISH] } "
2017-05-16 20:16:35 +02:00
if [ [ " $1 " = = "completed" ] ] ; then
cat " $TEMPDIR /term_output_ $( printf "%08d" $NEXT_PARALLEL_TEST_TO_FINISH ) .log "
2017-05-23 20:52:25 +02:00
if " $JSONHEADER " && [ [ -s " $TEMPDIR /jsonfile_ $( printf "%08d" $NEXT_PARALLEL_TEST_TO_FINISH ) .json " ] ] ; then
# Need to ensure that a separator is only added if the test
# produced some JSON output.
" $FIRST_JSON_OUTPUT " || fileout_separator # this is needed for appended output, see #687
FIRST_JSON_OUTPUT = false
cat " $TEMPDIR /jsonfile_ $( printf "%08d" $NEXT_PARALLEL_TEST_TO_FINISH ) .json " >> " $JSONFILE "
Massing testing with command line error
There is a bug in testssl.sh that occurs if mass testing is being performed, there is an error in the command line for one of the child tests, and either a single HTML file or a single JSON file is being created.
If mass testing is being performed and `parse_cmd_line()` detects an error in the command line for one of the child tests, then it will call `help()`, which will exit the program, resulting in `cleanup ()` being called. `cleanup ()` will call `html_footer()` and `fileout_footer()`. Since `html_header()` and `json_header()` have not yet been called, `$HTMLHEADER` and `$JSONHEADER` will both be `true, and so `html_footer()` and `fileout_footer()` will output HTML and JSON footers, even though no headers have been output.
This PR fixes the problem by having `help()` set `$HTMLHEADER` and `$JSONHEADER` to `false` so that no HTML or JSON footers are created.
A related problem is that if a single JSON file is being created, the parent process will insert a separator (a comma) into the JSON file between the outputs of each child process. However, if there is an error in one of the child process's command lines, then this child process will not produce any JSON output and so the JSON file will have two consecutive separators (commas), which is invalid according to http://jsonlint.com.
This PR provides a partial fix for the problem for parallel mass testing by checking whether a child process has created a non-empty JSON output before adding a separator to the JSON file. It leaves two unresolved problems:
* It does not fix the problem at all for `run_mass_testing()`, where the separator is added before the test with the command line error is run.
* It does not fix the problem for parallel mass testing for the case in which the first child test has a command line error.
2017-05-22 22:57:15 +02:00
fi
2017-05-16 20:16:35 +02:00
" $CSVHEADER " && cat " $TEMPDIR /csvfile_ $( printf "%08d" $NEXT_PARALLEL_TEST_TO_FINISH ) .csv " >> " $CSVFILE "
" $HTMLHEADER " && cat " $TEMPDIR /htmlfile_ $( printf "%08d" $NEXT_PARALLEL_TEST_TO_FINISH ) .html " >> " $HTMLFILE "
elif [ [ " $1 " = = "stopped" ] ] ; then
outln "\nTest was stopped before it completed.\n"
else
outln "\nTest timed out before it completed.\n"
fi
2017-04-05 22:58:57 +02:00
}
2015-12-08 13:31:52 +01:00
2017-03-31 12:24:25 +02:00
#FIXME: not called/tested yet
run_mass_testing_parallel( ) {
2015-09-28 22:54:00 +02:00
local cmdline = ""
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
local -i i nr_active_tests = 0
local -a -i start_time = ( )
2017-05-16 20:16:35 +02:00
local -i curr_time wait_time
2017-06-12 18:23:55 +02:00
local gmapadd = ""
local saved_fname = " $FNAME "
2015-09-28 22:54:00 +02:00
2017-03-31 12:24:25 +02:00
if [ [ ! -r " $FNAME " ] ] && $IKNOW_FNAME ; then
2016-07-04 23:05:12 +02:00
fatal " Can't read file \" $FNAME \" " "2"
2015-09-28 22:54:00 +02:00
fi
2017-04-07 09:49:44 +02:00
2017-06-12 18:23:55 +02:00
if [ [ " $( head -1 " $FNAME " ) " = ~ ( Nmap [ 4-8] ) ( .*) ( scan initiated ) ( .*) ] ] ; then
gmapadd = "grep(p)able nmap "
nmap_to_plain_file
fi
pr_reverse " ====== Running in file batch mode with ${ gmapadd } file=\" $saved_fname \" ====== " ; outln "\n"
2015-09-28 22:54:00 +02:00
while read cmdline; do
2017-04-07 09:49:44 +02:00
cmdline = " $( filter_input " $cmdline " ) "
2015-09-28 22:54:00 +02:00
[ [ -z " $cmdline " ] ] && continue
[ [ " $cmdline " = = "EOF" ] ] && break
2017-04-11 23:05:27 +02:00
# Create the command line for the child in the form of an array (see #702)
create_mass_testing_cmdline " parallel $( printf "%08d" $NR_PARALLEL_TESTS ) " $cmdline
2017-04-05 22:58:57 +02:00
# fileout() won't include the "service" information in the JSON file for the child process
# if the JSON file doesn't already exist.
2017-04-12 21:50:55 +02:00
" $JSONHEADER " && >" $TEMPDIR /jsonfile_ $( printf "%08d" $NR_PARALLEL_TESTS ) .json "
2017-04-12 16:00:40 +02:00
PARALLEL_TESTING_CMDLINE[ NR_PARALLEL_TESTS] = " $( create_cmd_line_string " $0 " " ${ MASS_TESTING_CMDLINE [@] } " ) "
2017-09-19 00:08:33 +02:00
if [ [ -z " $( type -p " $0 " ) " ] ] ; then
2017-05-16 20:16:35 +02:00
CHILD_MASS_TESTING = true " $RUN_DIR / $PROG_NAME " " ${ MASS_TESTING_CMDLINE [@] } " > " $TEMPDIR /term_output_ $( printf "%08d" $NR_PARALLEL_TESTS ) .log " 2>& 1 &
2017-04-12 18:15:27 +02:00
else
2017-05-16 20:16:35 +02:00
CHILD_MASS_TESTING = true " $0 " " ${ MASS_TESTING_CMDLINE [@] } " > " $TEMPDIR /term_output_ $( printf "%08d" $NR_PARALLEL_TESTS ) .log " 2>& 1 &
2017-04-12 18:15:27 +02:00
fi
2017-04-05 22:58:57 +02:00
PARALLEL_TESTING_PID[ NR_PARALLEL_TESTS] = $!
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
start_time[ NR_PARALLEL_TESTS] = $( date +%s)
2017-05-16 20:16:35 +02:00
if " $INTERACTIVE " ; then
echo -en "\r \r" 1>& 2
echo -n " Started test # $NR_PARALLEL_TESTS " 1>& 2
[ [ $NEXT_PARALLEL_TEST_TO_FINISH -lt $NR_PARALLEL_TESTS ] ] && \
echo -n " (waiting for test # $NEXT_PARALLEL_TEST_TO_FINISH to finish) " 1>& 2
fi
2017-04-05 22:58:57 +02:00
NR_PARALLEL_TESTS += 1
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
nr_active_tests += 1
2017-03-31 12:24:25 +02:00
sleep $PARALLEL_SLEEP
2017-04-05 22:58:57 +02:00
# Get the results of any completed tests
while [ [ $NEXT_PARALLEL_TEST_TO_FINISH -lt $NR_PARALLEL_TESTS ] ] ; do
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
if [ [ ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } -eq 0 ] ] ; then
2017-05-16 20:16:35 +02:00
" $INTERACTIVE " && echo -en "\r \r" 1>& 2
get_next_message_testing_parallel_result "completed"
2017-04-05 22:58:57 +02:00
NEXT_PARALLEL_TEST_TO_FINISH += 1
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
elif ! ps ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } >/dev/null ; then
2017-05-16 20:16:35 +02:00
" $INTERACTIVE " && echo -en "\r \r" 1>& 2
get_next_message_testing_parallel_result "completed"
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
NEXT_PARALLEL_TEST_TO_FINISH += 1
nr_active_tests = $nr_active_tests -1
2017-04-05 22:58:57 +02:00
else
break
fi
done
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
if [ [ $nr_active_tests -ge $MAX_PARALLEL ] ] ; then
curr_time = $( date +%s)
while true; do
# Check to see if any test completed
for ( ( i = NEXT_PARALLEL_TEST_TO_FINISH; i < NR_PARALLEL_TESTS; i++ ) ) ; do
if [ [ ${ PARALLEL_TESTING_PID [i] } -ne 0 ] ] && \
! ps ${ PARALLEL_TESTING_PID [i] } >/dev/null ; then
PARALLEL_TESTING_PID[ i] = 0
nr_active_tests = $nr_active_tests -1
break
fi
done
[ [ $nr_active_tests -lt $MAX_PARALLEL ] ] && break
if [ [ $curr_time -${ start_time [NEXT_PARALLEL_TEST_TO_FINISH] } -ge $MAX_WAIT_TEST ] ] ; then
# No test completed in the allocated time, so the first one to
# start will be killed.
kill ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } >& 2 2>/dev/null
wait ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } 2>/dev/null # make sure pid terminated, see wait(1p)
2017-05-16 20:16:35 +02:00
" $INTERACTIVE " && echo -en "\r \r" 1>& 2
get_next_message_testing_parallel_result "timeout"
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
NEXT_PARALLEL_TEST_TO_FINISH += 1
nr_active_tests = $nr_active_tests -1
break
fi
2017-05-16 20:16:35 +02:00
# Wake up to increment the counter every second (so that the counter
# appears to users as if it is operating smoothly), but check the
# status of the $MAX_PARALLEL active processes less often, since the
# ps command is expensive.
for ( ( i = 0; i <= $(( MAX_PARALLEL/5)) ; i++ ) ) ; do
wait_time = $(( curr_time-start_time[ NEXT_PARALLEL_TEST_TO_FINISH] ))
[ [ $wait_time -gt $MAX_WAIT_TEST ] ] && wait_time = $MAX_WAIT_TEST
if " $INTERACTIVE " ; then
echo -en "\r \r" 1>& 2
2017-05-19 20:28:18 +02:00
echo -n " Waiting for test # $NEXT_PARALLEL_TEST_TO_FINISH to finish " 1>& 2
2017-05-16 20:16:35 +02:00
if [ [ $(( MAX_WAIT_TEST-wait_time)) -le 60 ] ] ; then
echo -n " ( $(( MAX_WAIT_TEST-wait_time)) seconds to timeout) " 1>& 2
else
echo -n " ( $wait_time seconds) " 1>& 2
fi
fi
[ [ $wait_time -ge $MAX_WAIT_TEST ] ] && break
sleep 1
curr_time = $( date +%s)
done
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
done
2017-04-05 22:58:57 +02:00
fi
2017-03-31 12:24:25 +02:00
done < " $FNAME "
2017-04-05 22:58:57 +02:00
# Wait for remaining tests to finish
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
curr_time = $( date +%s)
2017-04-05 22:58:57 +02:00
while [ [ $NEXT_PARALLEL_TEST_TO_FINISH -lt $NR_PARALLEL_TESTS ] ] ; do
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
if [ [ ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } -eq 0 ] ] || \
! ps ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } >/dev/null ; then
2017-05-16 20:16:35 +02:00
" $INTERACTIVE " && echo -en "\r \r" 1>& 2
get_next_message_testing_parallel_result "completed"
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
NEXT_PARALLEL_TEST_TO_FINISH += 1
elif [ [ $curr_time -${ start_time [NEXT_PARALLEL_TEST_TO_FINISH] } -ge $MAX_WAIT_TEST ] ] ; then
kill ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } >& 2 2>/dev/null
wait ${ PARALLEL_TESTING_PID [NEXT_PARALLEL_TEST_TO_FINISH] } 2>/dev/null # make sure pid terminated, see wait(1p)
2017-05-16 20:16:35 +02:00
" $INTERACTIVE " && echo -en "\r \r" 1>& 2
get_next_message_testing_parallel_result "timeout"
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
NEXT_PARALLEL_TEST_TO_FINISH += 1
else
2017-05-16 20:16:35 +02:00
# Here it is okay to check process status every second, since the
# status of only one process is being checked.
if " $INTERACTIVE " ; then
echo -en "\r \r" 1>& 2
wait_time = $(( curr_time-start_time[ NEXT_PARALLEL_TEST_TO_FINISH] ))
[ [ $wait_time -gt $MAX_WAIT_TEST ] ] && wait_time = $MAX_WAIT_TEST
2017-05-19 20:28:18 +02:00
echo -n " Waiting for test # $NEXT_PARALLEL_TEST_TO_FINISH to finish " 1>& 2
2017-05-16 20:16:35 +02:00
if [ [ $(( MAX_WAIT_TEST-wait_time)) -le 60 ] ] ; then
echo -n " ( $(( MAX_WAIT_TEST-wait_time)) seconds to timeout) " 1>& 2
else
echo -n " ( $wait_time seconds) " 1>& 2
fi
fi
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
sleep 1
2017-05-16 20:16:35 +02:00
curr_time = $( date +%s)
Improvements to mass testing in parallel
This PR provides improvements to `run_mass_testing_parallel()`. Currently, `run_mass_testing_parallel()` treats `$MAX_PARALLEL` as the maximum difference between the number of the test whose results were last processed and the number of the most recently started test. This means that test #40 will not be started until the results of test #20 have been processed. I've encountered situations in which tests 21 though 39 have completed, but test #20 is still running, and so no new tests are started.
This PR fixes the problem by checking the status of all running child tests to see if any are complete, rather than just looking at `$NEXT_PARALLEL_TEST_TO_FINISH`. This prevents one slow child test (or a few slow child tests) from slowing up the entire mass testing process.
This PR also changes the basis for determining whether a slow child process should be killed. Rather than waiting `$MAX_WAIT_TEST` seconds from the time that the parent started waiting (which is rather arbitrary), it kills the process if `$MAX_WAIT_TEST` seconds have passed since the child test was started. Given this, and that the above change makes it less likely that a slow child test will slow up the overall testing, I increased `$MAX_WAIT_TEST` from 600 seconds to 1200 seconds.
I added some `debugme` statements that provide feedback on the status of testing, but in non-debug mode there may be a perception issue. If one test (e.g., test #20) is very slow, testssl.sh will not display any results from later tests until the slow test finishes, even though testssl.sh will continue running new tests in the background. The user, seeing no output from testssl.sh for an extended period of time, may think that testssl.sh has frozen, even though it is really just holding back on displaying the later results so that the results will be displayed in the order in which the tests were started.
2017-05-10 18:18:59 +02:00
fi
2017-04-05 22:58:57 +02:00
done
2016-01-31 21:02:18 +01:00
return $?
2015-09-28 22:54:00 +02:00
}
2015-05-17 22:43:53 +02:00
2016-01-23 19:18:33 +01:00
# This initializes boolean global do_* variables. They keep track of what to do
2015-09-03 12:14:47 +02:00
# -- as the name insinuates
2015-05-17 22:43:53 +02:00
initialize_globals( ) {
2015-09-17 15:30:15 +02:00
do_allciphers = false
do_vulnerabilities = false
do_beast = false
2017-02-03 22:36:04 +01:00
do_lucky13 = false
2015-09-17 15:30:15 +02:00
do_breach = false
do_ccs_injection = false
2017-04-18 23:15:32 +02:00
do_ticketbleed = false
2015-09-17 15:30:15 +02:00
do_cipher_per_proto = false
do_crime = false
do_freak = false
do_logjam = false
2016-03-03 19:50:44 +01:00
do_drown = false
2015-09-17 15:30:15 +02:00
do_header = false
do_heartbleed = false
do_mx_all_ips = false
2015-09-28 22:54:00 +02:00
do_mass_testing = false
2015-11-11 11:56:32 +01:00
do_logging = false
2016-01-23 19:18:33 +01:00
do_json = false
2016-10-28 15:30:07 +02:00
do_pretty_json = false
2016-01-23 19:18:33 +01:00
do_csv = false
2017-02-07 20:25:41 +01:00
do_html = false
2015-09-17 15:30:15 +02:00
do_pfs = false
do_protocols = false
do_rc4 = false
2017-08-04 20:48:21 +02:00
do_grease = false
2015-09-17 15:30:15 +02:00
do_renego = false
do_std_cipherlists = false
do_server_defaults = false
do_server_preference = false
do_ssl_poodle = false
2017-02-02 14:42:06 +01:00
do_sweet32 = false
2015-09-17 15:30:15 +02:00
do_tls_fallback_scsv = false
2017-04-10 14:45:39 +02:00
do_cipher_match = false
2015-09-17 15:30:15 +02:00
do_tls_sockets = false
2016-01-13 10:21:01 +01:00
do_client_simulation = false
2016-01-31 21:02:18 +01:00
do_display_only = false
2015-05-17 22:43:53 +02:00
}
2015-09-03 12:14:47 +02:00
# Set default scanning options for the boolean global do_* variables.
2015-05-17 22:43:53 +02:00
set_scanning_defaults( ) {
2015-09-17 15:30:15 +02:00
do_allciphers = true
do_vulnerabilities = true
do_beast = true
2017-02-03 22:36:04 +01:00
do_lucky13 = true
2015-09-17 15:30:15 +02:00
do_breach = true
2017-02-02 14:42:06 +01:00
do_heartbleed = true
2015-09-17 15:30:15 +02:00
do_ccs_injection = true
2017-04-18 23:15:32 +02:00
do_ticketbleed = true
2015-09-17 15:30:15 +02:00
do_crime = true
do_freak = true
do_logjam = true
2016-03-03 19:50:44 +01:00
do_drown = true
2017-02-02 14:42:06 +01:00
do_ssl_poodle = true
do_sweet32 = true
2015-09-17 15:30:15 +02:00
do_header = true
do_pfs = true
do_rc4 = true
2017-02-02 14:42:06 +01:00
do_protocols = true
2015-09-17 15:30:15 +02:00
do_renego = true
do_std_cipherlists = true
do_server_defaults = true
do_server_preference = true
do_tls_fallback_scsv = true
2016-01-13 10:21:01 +01:00
do_client_simulation = true
2017-02-03 22:36:04 +01:00
VULN_COUNT = 16
2015-05-17 22:43:53 +02:00
}
query_globals( ) {
2015-09-17 15:30:15 +02:00
local gbl
local true_nr = 0
2017-04-18 23:15:32 +02:00
for gbl in do_allciphers do_vulnerabilities do_beast do_lucky13 do_breach do_ccs_injection do_ticketbleed do_cipher_per_proto do_crime \
2017-08-04 20:48:21 +02:00
do_freak do_logjam do_drown do_header do_heartbleed do_mx_all_ips do_pfs do_protocols do_rc4 do_grease do_renego \
2017-06-20 08:43:35 +02:00
do_std_cipherlists do_server_defaults do_server_preference do_ssl_poodle do_tls_fallback_scsv \
2017-04-10 14:45:39 +02:00
do_sweet32 do_client_simulation do_cipher_match do_tls_sockets do_mass_testing do_display_only; do
2015-09-17 15:30:15 +02:00
[ [ " ${ !gbl } " = = "true" ] ] && let true_nr++
done
return $true_nr
2015-05-17 22:43:53 +02:00
}
debug_globals( ) {
2015-09-17 15:30:15 +02:00
local gbl
2017-04-18 23:15:32 +02:00
for gbl in do_allciphers do_vulnerabilities do_beast do_lucky13 do_breach do_ccs_injection do_ticketbleed do_cipher_per_proto do_crime \
2017-08-04 20:48:21 +02:00
do_freak do_logjam do_drown do_header do_heartbleed do_mx_all_ips do_pfs do_protocols do_rc4 do_grease do_renego \
2017-06-20 08:43:35 +02:00
do_std_cipherlists do_server_defaults do_server_preference do_ssl_poodle do_tls_fallback_scsv \
2017-04-10 14:45:39 +02:00
do_sweet32 do_client_simulation do_cipher_match do_tls_sockets do_mass_testing do_display_only; do
2015-09-17 15:30:15 +02:00
printf "%-22s = %s\n" $gbl " ${ !gbl } "
done
2015-05-17 22:43:53 +02:00
printf "%-22s : %s\n" URI: " $URI "
}
2016-01-31 21:02:18 +01:00
# arg1: either switch+value (=) or switch
# arg2: value (if no = provided)
2015-06-01 12:01:38 +02:00
parse_opt_equal_sign( ) {
2015-09-17 15:30:15 +02:00
if [ [ " $1 " = = *= * ] ] ; then
2016-01-31 21:02:18 +01:00
echo ${ 1 #*= }
2015-09-17 15:30:15 +02:00
return 1 # = means we don't need to shift args!
else
2017-03-23 16:36:29 +01:00
echo " $2 "
2015-09-17 15:30:15 +02:00
return 0 # we need to shift
fi
2015-06-01 12:01:38 +02:00
}
2017-04-12 16:00:40 +02:00
# Create the command line string for printing purposes
# See http://stackoverflow.com/questions/10835933/preserve-quotes-in-bash-arguments
create_cmd_line_string( ) {
local arg
local -a allargs = ( )
local chars = '[ !"#$&()*,;<>?\^`{|}]'
while [ [ $# -gt 0 ] ] ; do
if [ [ $1 = = *\' * ] ] ; then
arg = \" " $1 " \"
elif [ [ $1 = = *$chars * ] ] ; then
arg = " ' $1 ' "
else
arg = " $1 "
fi
allargs += ( " $arg " ) # ${allargs[@]} is to be used only for printing
shift
done
printf '%s\n' " ${ allargs [*] } "
}
2015-06-01 12:01:38 +02:00
2015-06-28 13:52:42 +02:00
parse_cmd_line( ) {
2017-11-01 09:58:52 +01:00
local outfile_arg = ""
local cipher_mapping
local -i retvat = 0
2017-04-12 16:00:40 +02:00
CMDLINE = " $( create_cmd_line_string " ${ CMDLINE_ARRAY [@] } " ) "
2017-02-15 19:40:06 +01:00
# Show usage if no options were specified
[ [ -z " $1 " ] ] && help 0
2015-09-17 15:30:15 +02:00
# Set defaults if only an URI was specified, maybe ToDo: use "="-option, then: ${i#*=} i.e. substring removal
[ [ " $# " -eq 1 ] ] && set_scanning_defaults
while [ [ $# -gt 0 ] ] ; do
case $1 in
2017-04-19 19:46:54 +02:00
--help)
2016-01-23 19:18:33 +01:00
help 0
2015-09-17 15:30:15 +02:00
; ;
-b| --banner| -v| --version)
maketempf
2017-10-22 23:41:17 +02:00
get_install_dir
2016-09-21 21:42:45 +02:00
find_openssl_binary
prepare_debug
2015-09-17 15:30:15 +02:00
mybanner
exit 0
; ;
--mx)
do_mx_all_ips = true
PORT = 25
; ;
--mx465) # doesn't work with major ISPs
do_mx_all_ips = true
2016-01-23 19:18:33 +01:00
PORT = 465
2015-09-17 15:30:15 +02:00
; ;
--mx587) # doesn't work with major ISPs
do_mx_all_ips = true
2016-01-23 19:18:33 +01:00
PORT = 587
2015-09-17 15:30:15 +02:00
; ;
--ip| --ip= *)
2017-04-07 09:49:44 +02:00
CMDLINE_IP = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2015-09-17 15:30:15 +02:00
[ [ $? -eq 0 ] ] && shift
2017-05-19 20:28:18 +02:00
if [ [ $CMDLINE_IP = = "proxy" ] ] ; then
DNS_VIA_PROXY = true
unset CMDLINE_IP
fi
2015-09-17 15:30:15 +02:00
; ;
2016-10-28 21:37:10 +02:00
-n| --nodns)
NODNS = true
; ;
2016-01-31 21:02:18 +01:00
-V| -V= *| --local| --local= *) # attention, this could have a value or not!
do_display_only = true
PATTERN2SHOW = " $( parse_opt_equal_sign " $1 " " $2 " ) "
retval = $?
if [ [ " $PATTERN2SHOW " = = -* ] ] ; then
unset PATTERN2SHOW # we hit the next command ==> not our value
else # it was ours, point to next arg
[ [ $retval -eq 0 ] ] && shift
fi
2015-09-17 15:30:15 +02:00
; ;
-x| -x= *| --single[ -_] cipher| --single[ -_] cipher = *)
2017-04-10 14:45:39 +02:00
do_cipher_match = true
2015-09-17 15:30:15 +02:00
single_cipher = $( parse_opt_equal_sign " $1 " " $2 " )
[ [ $? -eq 0 ] ] && shift
; ;
-t| -t= *| --starttls| --starttls= *)
do_starttls = true
2017-04-07 09:49:44 +02:00
STARTTLS_PROTOCOL = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2015-09-17 15:30:15 +02:00
[ [ $? -eq 0 ] ] && shift
case $STARTTLS_PROTOCOL in
2017-06-29 23:57:32 +02:00
ftp| smtp| pop3| imap| xmpp| telnet| ldap| nntp| postgres| mysql) ; ;
2017-06-29 23:39:22 +02:00
ftps| smtps| pop3s| imaps| xmpps| telnets| ldaps| nntps) ; ;
2017-02-25 16:31:30 +01:00
*) tmln_magenta " \nunrecognized STARTTLS protocol \" $1 \", see help " 1>& 2
2015-09-17 15:30:15 +02:00
help 1 ; ;
esac
; ;
--xmpphost| --xmpphost= *)
XMPP_HOST = $( parse_opt_equal_sign " $1 " " $2 " )
[ [ $? -eq 0 ] ] && shift
; ;
-e| --each-cipher)
do_allciphers = true
; ;
-E| --cipher-per-proto| --cipher_per_proto)
do_cipher_per_proto = true
; ;
-p| --protocols)
do_protocols = true
2015-12-13 01:20:57 +01:00
; ;
2017-04-08 09:14:56 +02:00
-s| --std| --standard)
2015-09-17 15:30:15 +02:00
do_std_cipherlists = true
; ;
-S| --server[ -_] defaults)
do_server_defaults = true
; ;
2015-12-29 10:05:20 +01:00
-P| --server[ _-] preference| --preference)
2015-09-17 15:30:15 +02:00
do_server_preference = true
; ;
2017-04-18 23:15:32 +02:00
-h| --header| --headers)
2015-09-17 15:30:15 +02:00
do_header = true
; ;
2016-01-13 10:21:01 +01:00
-c| --client-simulation)
do_client_simulation = true
; ;
2015-09-17 15:30:15 +02:00
-U| --vulnerable)
do_vulnerabilities = true
do_heartbleed = true
do_ccs_injection = true
2017-04-18 23:15:32 +02:00
do_ticketbleed = true
2015-09-17 15:30:15 +02:00
do_renego = true
do_crime = true
do_breach = true
do_ssl_poodle = true
do_tls_fallback_scsv = true
2017-02-02 14:42:06 +01:00
do_sweet32 = true
2015-09-17 15:30:15 +02:00
do_freak = true
2016-03-03 19:50:44 +01:00
do_drown = true
2015-09-17 15:30:15 +02:00
do_logjam = true
do_beast = true
2017-02-03 22:36:04 +01:00
do_lucky13 = true
2015-09-17 15:30:15 +02:00
do_rc4 = true
2017-04-19 01:21:13 +02:00
VULN_COUNT = 16
2015-09-17 15:30:15 +02:00
; ;
2017-04-18 23:15:32 +02:00
-H| --heartbleed)
2015-09-17 15:30:15 +02:00
do_heartbleed = true
2016-01-23 19:18:33 +01:00
let "VULN_COUNT++"
2015-09-17 15:30:15 +02:00
; ;
-I| --ccs| --ccs[ -_] injection)
do_ccs_injection = true
2016-01-23 19:18:33 +01:00
let "VULN_COUNT++"
2015-09-17 15:30:15 +02:00
; ;
2017-04-18 23:15:32 +02:00
-T| --ticketbleed)
do_ticketbleed = true
let "VULN_COUNT++"
; ;
2015-09-17 15:30:15 +02:00
-R| --renegotiation)
do_renego = true
let "VULN_COUNT++"
; ;
-C| --compression| --crime)
do_crime = true
let "VULN_COUNT++"
; ;
2017-04-18 23:15:32 +02:00
-B| --breach)
2015-09-17 15:30:15 +02:00
do_breach = true
let "VULN_COUNT++"
; ;
-O| --poodle)
do_ssl_poodle = true
do_tls_fallback_scsv = true
let "VULN_COUNT++"
; ;
-Z| --tls[ _-] fallback| tls[ _-] fallback[ _-] scs)
do_tls_fallback_scsv = true
let "VULN_COUNT++"
; ;
2017-02-02 14:42:06 +01:00
-W| --sweet32)
do_sweet32 = true
let "VULN_COUNT++"
; ;
2015-09-17 15:30:15 +02:00
-F| --freak)
do_freak = true
let "VULN_COUNT++"
; ;
2016-03-03 19:50:44 +01:00
-D| --drown)
do_drown = true
let "VULN_COUNT++"
; ;
2015-09-17 15:30:15 +02:00
-J| --logjam)
do_logjam = true
let "VULN_COUNT++"
; ;
-A| --beast)
do_beast = true
let "VULN_COUNT++"
; ;
2017-02-03 22:36:04 +01:00
-L| --lucky13)
do_lucky13 = true
let "VULN_COUNT++"
; ;
2015-09-17 15:30:15 +02:00
-4| --rc4| --appelbaum)
do_rc4 = true
let "VULN_COUNT++"
; ;
2017-04-08 09:14:56 +02:00
-f| --pfs| --fs| --nsa)
2015-09-17 15:30:15 +02:00
do_pfs = true
; ;
2017-08-04 20:48:21 +02:00
-g| --grease)
do_grease = true
; ;
2015-09-17 15:30:15 +02:00
--devel) ### this development feature will soon disappear
2016-08-28 21:41:30 +02:00
HEX_CIPHER = " $TLS12_CIPHER "
2016-03-29 21:56:31 +02:00
# DEBUG=3 ./testssl.sh --devel 03 "cc, 13, c0, 13" google.de --> TLS 1.2, old CHACHA/POLY
# DEBUG=3 ./testssl.sh --devel 03 "cc,a8, cc,a9, cc,aa, cc,ab, cc,ac" blog.cloudflare.com --> new CHACHA/POLY
2015-10-11 23:07:16 +02:00
# DEBUG=3 ./testssl.sh --devel 01 yandex.ru --> TLS 1.0
# DEBUG=3 ./testssl.sh --devel 00 <host which supports SSLv3>
# DEBUG=3 ./testssl.sh --devel 22 <host which still supports SSLv2>
2016-01-23 19:18:33 +01:00
TLS_LOW_BYTE = " $2 " ;
2015-09-17 15:30:15 +02:00
if [ [ $# -eq 4 ] ] ; then # protocol AND ciphers specified
HEX_CIPHER = " $3 "
shift
fi
shift
do_tls_sockets = true
2016-01-23 19:18:33 +01:00
outln " \nTLS_LOW_BYTE/HEX_CIPHER: ${ TLS_LOW_BYTE } / ${ HEX_CIPHER } "
2015-09-17 15:30:15 +02:00
; ;
2016-01-23 19:18:33 +01:00
--wide)
2015-09-17 15:30:15 +02:00
WIDE = true
; ;
--assuming[ _-] http| --assume[ -_] http)
2016-10-11 22:30:30 +02:00
ASSUME_HTTP = true
2015-09-17 15:30:15 +02:00
; ;
--sneaky)
SNEAKY = true
; ;
-q| --quiet)
QUIET = true
; ;
--file| --file= *)
# no shift here as otherwise URI is empty and it bails out
2017-04-07 09:49:44 +02:00
FNAME = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2015-09-28 22:54:00 +02:00
[ [ $? -eq 0 ] ] && shift
2015-09-17 15:30:15 +02:00
IKNOW_FNAME = true
2015-09-28 22:54:00 +02:00
WARNINGS = batch # set this implicitly!
do_mass_testing = true
2015-09-17 15:30:15 +02:00
; ;
2017-06-12 18:23:55 +02:00
--mode| --mode= *)
MASS_TESTING_MODE = " $( parse_opt_equal_sign " $1 " " $2 " ) "
[ [ $? -eq 0 ] ] && shift
case " $MASS_TESTING_MODE " in
serial| parallel) ; ;
*) tmln_magenta "\nmass testing mode can be either \"serial\" or \"parallel\""
help 1
esac
; ;
2017-06-12 22:56:36 +02:00
--serial)
MASS_TESTING_MODE = serial
; ;
2017-06-12 19:07:58 +02:00
--parallel)
MASS_TESTING_MODE = parallel
; ;
2015-09-17 15:30:15 +02:00
--warnings| --warnings= *)
2016-01-23 19:18:33 +01:00
WARNINGS = $( parse_opt_equal_sign " $1 " " $2 " )
2015-09-17 15:30:15 +02:00
[ [ $? -eq 0 ] ] && shift
case " $WARNINGS " in
2017-06-28 20:28:23 +02:00
batch| off) ; ;
*) tmln_magenta "\nwarnings can be either \"batch\", or \"off\""
2015-11-11 11:56:32 +01:00
help 1
2015-09-17 15:30:15 +02:00
esac
; ;
--show[ -_] each)
2016-03-05 21:07:49 +01:00
SHOW_EACH_C = true
2016-01-23 19:18:33 +01:00
; ;
2016-11-15 12:59:07 +01:00
--fast)
FAST = true
; ;
2015-11-03 23:29:53 +01:00
--bugs)
BUGS = "-bugs"
2016-01-23 19:18:33 +01:00
; ;
2015-09-17 15:30:15 +02:00
--debug| --debug= *)
DEBUG = $( parse_opt_equal_sign " $1 " " $2 " )
[ [ $? -eq 0 ] ] && shift
case $DEBUG in
[ 0-6] ) ; ;
2017-02-25 16:31:30 +01:00
*) tmln_magenta_term " \nunrecognized debug value \" $1 \", must be between 0..6 " 1>& 2
2016-01-23 19:18:33 +01:00
help 1
2015-09-17 15:30:15 +02:00
esac
; ;
--color| --color= *)
2017-04-07 09:49:44 +02:00
COLOR = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2015-09-17 15:30:15 +02:00
[ [ $? -eq 0 ] ] && shift
case $COLOR in
[ 0-2] ) ; ;
*) COLOR = 2
2017-02-25 16:31:30 +01:00
tmln_magenta " \nunrecognized color: \" $1 \", must be between 0..2 " 1>& 2
2015-11-11 11:56:32 +01:00
help 1
2015-09-17 15:30:15 +02:00
esac
; ;
2015-12-06 20:11:33 +01:00
--colorblind)
COLORBLIND = true
; ;
2015-11-11 11:56:32 +01:00
--log| --logging)
2016-01-23 19:18:33 +01:00
do_logging = true
2016-01-23 23:33:17 +01:00
; ; # DEFINITION of LOGFILE if no arg specified: automagically in parse_hn_port()
2015-11-11 11:56:32 +01:00
# following does the same but we can specify a log location additionally
2017-10-20 16:32:57 +02:00
--logfile| --logfile= *| -oL| -oL= *)
2017-04-07 09:49:44 +02:00
LOGFILE = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2015-11-11 11:56:32 +01:00
[ [ $? -eq 0 ] ] && shift
2016-01-23 19:18:33 +01:00
do_logging = true
; ;
--json)
2017-07-10 10:57:48 +02:00
$do_pretty_json && JSONHEADER = false && fatal "flat and pretty JSON output are mutually exclusive" 251
2016-01-23 19:18:33 +01:00
do_json = true
2016-01-23 23:33:17 +01:00
; ; # DEFINITION of JSONFILE is not arg specified: automagically in parse_hn_port()
2016-01-23 19:18:33 +01:00
# following does the same but we can specify a log location additionally
2017-10-20 16:32:57 +02:00
--jsonfile| --jsonfile= *| -oj| -oj= *)
2017-07-10 10:57:48 +02:00
$do_pretty_json && JSONHEADER = false && fatal "flat and pretty JSON output are mutually exclusive" 251
2017-04-07 09:49:44 +02:00
JSONFILE = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2016-01-23 19:18:33 +01:00
[ [ $? -eq 0 ] ] && shift
do_json = true
; ;
2016-10-28 15:30:07 +02:00
--json-pretty)
2017-07-10 10:57:48 +02:00
$do_json && JSONHEADER = false && fatal "flat and pretty JSON output are mutually exclusive" 251
2016-10-28 15:30:07 +02:00
do_pretty_json = true
; ;
2017-10-20 16:32:57 +02:00
--jsonfile-pretty| --jsonfile-pretty= *| -oJ| -oJ= *)
2017-07-10 10:57:48 +02:00
$do_json && JSONHEADER = false && fatal "flat and pretty JSON output are mutually exclusive" 251
2017-04-07 09:49:44 +02:00
JSONFILE = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2016-10-28 15:30:07 +02:00
[ [ $? -eq 0 ] ] && shift
do_pretty_json = true
; ;
--severity| --severity= *)
set_severity_level " $( parse_opt_equal_sign " $1 " " $2 " ) "
[ [ $? -eq 0 ] ] && shift
; ;
2016-11-17 23:27:27 +01:00
--hints)
GIVE_HINTS = true
; ;
2016-01-23 19:18:33 +01:00
--csv)
do_csv = true
2016-01-23 23:33:17 +01:00
; ; # DEFINITION of CSVFILE is not arg specified: automagically in parse_hn_port()
2016-01-23 19:18:33 +01:00
# following does the same but we can specify a log location additionally
2017-10-20 16:32:57 +02:00
--csvfile| --csvfile= *| -oC| -oC= *)
2017-04-07 09:49:44 +02:00
CSVFILE = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2016-01-23 19:18:33 +01:00
[ [ $? -eq 0 ] ] && shift
do_csv = true
; ;
2017-02-14 19:19:12 +01:00
--html)
do_html = true
; ; # DEFINITION of HTMLFILE is not arg specified: automagically in parse_hn_port()
# following does the same but we can specify a file location additionally
2017-10-20 16:32:57 +02:00
--htmlfile| --htmlfile= *| -oH| -oH= *)
2017-04-07 09:49:44 +02:00
HTMLFILE = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2017-02-07 20:25:41 +01:00
[ [ $? -eq 0 ] ] && shift
do_html = true
Handle --file option
Introduced "trick" so that if the `--file` option is used, `html_header()` will only be called once before anything is printed and `html_footer()` will only be called once after all printing is complete. With this, `html_header()` now delete the output file if it exists.
Also introduced the `html_reserved()`, which is called for all text to be sent to `out_html()`. `html_reserved()` converts any HTML reserved characters (", ', &, <, >) to their corresponding entity names (", ', &, <, >).
2017-02-09 23:03:21 +01:00
; ;
2017-11-01 09:58:52 +01:00
--outfile| --outfile| -oa| -oa= *)
outfile_arg = " $( parse_opt_equal_sign " $1 " " $2 " ) "
if [ [ " $outfile_arg " != "auto" ] ] ; then
HTMLFILE = " $outfile_arg .html "
CSVFILE = " $outfile_arg .csv "
JSONFILE = " $outfile_arg .json "
LOGFILE = " $outfile_arg .log "
fi
2017-10-20 16:32:57 +02:00
[ [ $? -eq 0 ] ] && shift
do_html = true
do_json = true
do_csv = true
do_logging = true
; ;
2017-11-01 09:58:52 +01:00
--outFile| --outFile| foA| -oA= *)
outfile_arg = " $( parse_opt_equal_sign " $1 " " $2 " ) "
if [ [ " $outfile_arg " != "auto" ] ] ; then
HTMLFILE = " $outfile_arg .html "
CSVFILE = " $outfile_arg .csv "
JSONFILE = " $outfile_arg .json "
LOGFILE = " $outfile_arg .log "
fi
2017-10-20 16:32:57 +02:00
[ [ $? -eq 0 ] ] && shift
do_html = true
do_pretty_json = true
do_csv = true
do_logging = true
; ;
2016-06-13 15:35:56 +02:00
--append)
APPEND = true
; ;
2015-09-17 15:30:15 +02:00
--openssl| --openssl= *)
2017-04-07 09:49:44 +02:00
OPENSSL = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2015-09-17 15:30:15 +02:00
[ [ $? -eq 0 ] ] && shift
; ;
2016-07-12 15:59:24 +02:00
--openssl-timeout| --openssl-timeout= *)
2017-04-07 09:49:44 +02:00
OPENSSL_TIMEOUT = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2016-07-12 15:59:24 +02:00
[ [ $? -eq 0 ] ] && shift
; ;
2016-01-16 20:51:03 +01:00
--mapping| --mapping= *)
2017-04-07 09:49:44 +02:00
cipher_mapping = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2016-01-16 20:51:03 +01:00
[ [ $? -eq 0 ] ] && shift
case " $cipher_mapping " in
2017-02-27 16:34:04 +01:00
no-openssl) DISPLAY_CIPHERNAMES = "rfc-only" ; ;
no-rfc) DISPLAY_CIPHERNAMES = "openssl-only" ; ;
openssl) DISPLAY_CIPHERNAMES = "openssl" ; ;
rfc) DISPLAY_CIPHERNAMES = "rfc" ; ;
*) tmln_warning "\nmapping can only be \"no-openssl\", \"no-rfc\", \"openssl\" or \"rfc\""
2016-01-16 20:51:03 +01:00
help 1 ; ;
esac
; ;
2015-09-17 15:30:15 +02:00
--proxy| --proxy= *)
2017-04-07 09:49:44 +02:00
PROXY = " $( parse_opt_equal_sign " $1 " " $2 " ) "
2015-09-17 15:30:15 +02:00
[ [ $? -eq 0 ] ] && shift
; ;
2015-10-05 09:56:21 +02:00
-6) # doesn't work automagically. My versions have -DOPENSSL_USE_IPV6, CentOS/RHEL/FC do not
HAS_IPv6 = true
; ;
2017-06-01 18:08:13 +02:00
--has[ -_] dhbits| --has[ _-] dh[ -_] bits)
# Should work automagically. Helper switch for CentOS,RHEL+FC w openssl server temp key backport (version 1.0.1), see #190
2015-09-22 20:09:26 +02:00
HAS_DH_BITS = true
; ;
2015-09-17 15:30:15 +02:00
--ssl_native| --ssl-native)
SSL_NATIVE = true
; ;
( --) shift
2016-01-23 19:18:33 +01:00
break
2015-09-17 15:30:15 +02:00
; ;
2017-02-25 16:31:30 +01:00
( -*) tmln_warning " 0: unrecognized option \" $1 \" " 1>& 2;
2016-01-23 19:18:33 +01:00
help 1
2015-09-17 15:30:15 +02:00
; ;
2016-01-23 19:18:33 +01:00
( *) break
2015-09-17 15:30:15 +02:00
; ;
esac
shift
done
2017-02-15 19:40:06 +01:00
# Show usage if no further options were specified
2016-01-31 21:02:18 +01:00
if [ [ -z " $1 " ] ] && [ [ -z " $FNAME " ] ] && ! $do_display_only ; then
2017-04-07 10:26:41 +02:00
fatal "URI missing" "1"
2015-09-28 22:54:00 +02:00
else
2015-09-17 15:30:15 +02:00
# left off here is the URI
2015-09-28 22:54:00 +02:00
URI = " $1 "
2015-12-08 13:31:52 +01:00
# parameter after URI supplied:
2017-04-07 10:26:41 +02:00
[ [ -n " $2 " ] ] && fatal "URI comes last" "1"
2015-09-28 22:54:00 +02:00
fi
2017-06-01 18:08:13 +02:00
[ [ $CMDLINE_IP = = "one" ] ] && " $NODNS " && fatal "\"--ip=one\" and \"--nodns\" doesn't work together"
2015-09-17 15:30:15 +02:00
2015-11-11 11:56:32 +01:00
[ [ " $DEBUG " -ge 5 ] ] && debug_globals
2015-09-17 15:30:15 +02:00
# if we have no "do_*" set here --> query_globals: we do a standard run -- otherwise just the one specified
query_globals && set_scanning_defaults
2015-05-17 22:43:53 +02:00
}
2015-09-26 22:44:33 +02:00
# connect call from openssl needs ipv6 in square brackets
nodeip_to_proper_ip6( ) {
2015-10-05 09:56:21 +02:00
local len_nodeip = 0
if is_ipv6addr $NODEIP ; then
2016-06-02 09:59:52 +02:00
${ UNBRACKTD_IPV6 } || NODEIP = " [ $NODEIP ] "
2015-10-05 09:56:21 +02:00
len_nodeip = ${# NODEIP }
2017-02-10 20:47:49 +01:00
CORRECT_SPACES = " $( printf -- " " '%.s' $( eval "echo {1.." $(( len_nodeip - 17 )) "}" ) ) "
2015-10-05 09:56:21 +02:00
# IPv6 addresses are longer, this varaible takes care that "further IP" and "Service" is properly aligned
fi
2015-09-26 22:44:33 +02:00
}
2015-10-11 23:07:16 +02:00
reset_hostdepended_vars( ) {
TLS_EXTENSIONS = ""
PROTOS_OFFERED = ""
OPTIMAL_PROTO = ""
2016-06-09 11:04:40 +02:00
SERVER_SIZE_LIMIT_BUG = false
2015-10-11 23:07:16 +02:00
}
2017-03-21 12:44:03 +01:00
# rough estimate, in the future we maybe want to make use of nano secs (%N)
# note this is for performance debugging purposes (MEASURE_TIME=yes), so eye candy is not important
2017-03-20 22:53:18 +01:00
time_right_align( ) {
2017-03-21 12:44:03 +01:00
local new_delta
" $MEASURE_TIME " || return
new_delta = $(( $( date +%s) - LAST_TIME ))
printf " % ${ COLUMNS } s " " $new_delta "
2017-04-12 21:00:08 +02:00
[ [ -e " $MEASURE_TIME_FILE " ] ] && echo " $1 : $new_delta " >> " $MEASURE_TIME_FILE "
LAST_TIME = $(( new_delta + LAST_TIME ))
2017-03-20 22:53:18 +01:00
}
2016-01-23 19:18:33 +01:00
2015-05-17 22:43:53 +02:00
lets_roll( ) {
2015-09-17 15:30:15 +02:00
local ret
2016-10-28 15:30:07 +02:00
local section_number = 1
2015-09-17 15:30:15 +02:00
2017-03-22 16:02:48 +01:00
if [ [ " $1 " = = init ] ] ; then
# called once upfront to be able to measure preperation time b4 everything starts
START_TIME = $( date +%s)
LAST_TIME = $START_TIME
2017-04-12 21:00:08 +02:00
[ [ -n " $MEASURE_TIME_FILE " ] ] && >" $MEASURE_TIME_FILE "
2017-03-22 16:02:48 +01:00
return 0
fi
time_right_align initialized
2017-03-20 22:53:18 +01:00
2016-07-04 23:05:12 +02:00
[ [ -z " $NODEIP " ] ] && fatal " $NODE doesn't resolve to an IP address " 2
2015-09-26 22:44:33 +02:00
nodeip_to_proper_ip6
2015-10-11 23:07:16 +02:00
reset_hostdepended_vars
2015-09-17 15:30:15 +02:00
determine_rdns
2016-10-28 15:30:07 +02:00
2017-01-28 07:17:58 +01:00
( ( SERVER_COUNTER++) )
2015-09-17 15:30:15 +02:00
determine_service " $1 " # any starttls service goes here
2016-09-28 23:15:37 +02:00
$do_tls_sockets && [ [ $TLS_LOW_BYTE -eq 22 ] ] && { sslv2_sockets "" "true" ; echo " $? " ; exit 0; }
Add option to retrieve entire server response
In some cases the server's response to a ClientHello spans more than one packet. If the goal is just to determine whether the connection was successful and to extract a few pieces of information from the ServerHello message, then this is unlikely to be a problem. However, if there is a desire to extract the server's certificate chain (Certificate message) or to determine the type and size of the server's ephemeral public key (ServerKeyExchange message), then the entire response needs to be obtained, even if it spans multiple packets.
This PR adds a new function, `check_tls_serverhellodone()`, that checks whether the entire response has been received (e.g., whether the ServerHelloDone message has been received). If the response indicates that the response is incomplete, then `tls_sockets()` requests more data from the server until the response is complete or until the server doesn't provide any more data in response.
The PR only changes the behavior of `tls_sockets()` if the caller indicates that it wants to extract the ephemeral key or that it wants the entire response to be parsed. Otherwise, only the first packet returned by the server is sent to `parse_tls_serverhello()`. [The value of `$process_full` is not used at the moment, but will be in a subsequent PR that modifies `parse_tls_serverhello()`.]
This PR also changes `tls_sockets()` to send a close_notify to the server if the connection was successfully established.
2016-10-25 17:04:23 +02:00
$do_tls_sockets && [ [ $TLS_LOW_BYTE -ne 22 ] ] && { tls_sockets " $TLS_LOW_BYTE " " $HEX_CIPHER " "all" ; echo " $? " ; exit 0; }
2017-08-28 21:11:47 +02:00
$do_cipher_match && { fileout_section_header $section_number false; run_cipher_match ${ single_cipher } ; }
( ( section_number++) )
2015-09-17 15:30:15 +02:00
# all top level functions now following have the prefix "run_"
2017-01-28 07:17:58 +01:00
fileout_section_header $section_number false && ( ( section_number++) )
2017-06-20 08:43:35 +02:00
$do_protocols && {
run_protocols; ret = $(( $? + ret)) ; time_right_align run_protocols;
run_spdy; ret = $(( $? + ret)) ; time_right_align run_spdy;
run_http2; ret = $(( $? + ret)) ; time_right_align run_http2;
}
2016-10-28 15:30:07 +02:00
fileout_section_header $section_number true && ( ( section_number++) )
2017-03-22 16:02:48 +01:00
$do_std_cipherlists && { run_std_cipherlists; ret = $(( $? + ret)) ; time_right_align run_std_cipherlists; }
2016-10-28 15:30:07 +02:00
fileout_section_header $section_number true && ( ( section_number++) )
2017-03-22 16:02:48 +01:00
$do_pfs && { run_pfs; ret = $(( $? + ret)) ; time_right_align run_pfs; }
2016-10-28 15:30:07 +02:00
fileout_section_header $section_number true && ( ( section_number++) )
2017-03-22 16:02:48 +01:00
$do_server_preference && { run_server_preference; ret = $(( $? + ret)) ; time_right_align run_server_preference; }
2016-10-28 15:30:07 +02:00
fileout_section_header $section_number true && ( ( section_number++) )
2017-03-22 16:02:48 +01:00
$do_server_defaults && { run_server_defaults; ret = $(( $? + ret)) ; time_right_align run_server_defaults; }
2015-09-17 15:30:15 +02:00
if $do_header ; then
#TODO: refactor this into functions
2016-10-28 15:30:07 +02:00
fileout_section_header $section_number true && ( ( section_number++) )
2015-09-17 15:30:15 +02:00
if [ [ $SERVICE = = "HTTP" ] ] ; then
run_http_header " $URL_PATH "
run_http_date " $URL_PATH "
run_hsts " $URL_PATH "
run_hpkp " $URL_PATH "
run_server_banner " $URL_PATH "
run_application_banner " $URL_PATH "
run_cookie_flags " $URL_PATH "
run_more_flags " $URL_PATH "
run_rp_banner " $URL_PATH "
2017-03-22 16:02:48 +01:00
time_right_align do_header
2015-09-17 15:30:15 +02:00
fi
2016-10-28 15:30:07 +02:00
else
( ( section_number++) )
2015-09-17 15:30:15 +02:00
fi
# vulnerabilities
if [ [ $VULN_COUNT -gt $VULN_THRESHLD ] ] || $do_vulnerabilities ; then
2015-10-15 14:15:07 +02:00
outln; pr_headlineln " Testing vulnerabilities "
outln
2015-09-17 15:30:15 +02:00
fi
2016-10-28 15:30:07 +02:00
fileout_section_header $section_number true && ( ( section_number++) )
2017-03-22 16:02:48 +01:00
$do_heartbleed && { run_heartbleed; ret = $(( $? + ret)) ; time_right_align run_heartbleed; }
$do_ccs_injection && { run_ccs_injection; ret = $(( $? + ret)) ; time_right_align run_ccs_injection; }
2017-04-18 23:15:32 +02:00
$do_ticketbleed && { run_ticketbleed; ret = $(( $? + ret)) ; time_right_align run_ticketbleed; }
2017-03-22 16:02:48 +01:00
$do_renego && { run_renego; ret = $(( $? + ret)) ; time_right_align run_renego; }
$do_crime && { run_crime; ret = $(( $? + ret)) ; time_right_align run_crime; }
$do_breach && { run_breach " $URL_PATH " ; ret = $(( $? + ret)) ; time_right_align run_breach; }
$do_ssl_poodle && { run_ssl_poodle; ret = $(( $? + ret)) ; time_right_align run_ssl_poodle; }
$do_tls_fallback_scsv && { run_tls_fallback_scsv; ret = $(( $? + ret)) ; time_right_align run_tls_fallback_scsv; }
$do_sweet32 && { run_sweet32; ret = $(( $? + ret)) ; time_right_align run_sweet32; }
$do_freak && { run_freak; ret = $(( $? + ret)) ; time_right_align run_freak; }
$do_drown && { run_drown ret = $(( $? + ret)) ; time_right_align run_drown; }
$do_logjam && { run_logjam; ret = $(( $? + ret)) ; time_right_align run_logjam; }
$do_beast && { run_beast; ret = $(( $? + ret)) ; time_right_align run_beast; }
$do_lucky13 && { run_lucky13; ret = $(( $? + ret)) ; time_right_align run_lucky13; }
$do_rc4 && { run_rc4; ret = $(( $? + ret)) ; time_right_align run_rc4; }
2015-09-17 15:30:15 +02:00
2016-10-28 15:30:07 +02:00
fileout_section_header $section_number true && ( ( section_number++) )
2017-03-22 16:02:48 +01:00
$do_allciphers && { run_allciphers; ret = $(( $? + ret)) ; time_right_align run_allciphers; }
$do_cipher_per_proto && { run_cipher_per_proto; ret = $(( $? + ret)) ; time_right_align run_cipher_per_proto; }
2016-10-28 15:30:07 +02:00
fileout_section_header $section_number true && ( ( section_number++) )
2017-03-22 16:02:48 +01:00
$do_client_simulation && { run_client_simulation; ret = $(( $? + ret)) ; time_right_align run_client_simulation; }
2015-09-17 15:30:15 +02:00
2017-08-04 20:48:21 +02:00
fileout_section_header $section_number true && ( ( section_number++) )
" $do_grease " && { run_grease; ret = $(( $? + ret)) ; time_right_align run_grease; }
2017-01-28 07:17:58 +01:00
fileout_section_footer true
2016-10-28 15:30:07 +02:00
2015-09-17 15:30:15 +02:00
outln
2016-10-28 15:30:07 +02:00
END_TIME = $( date +%s)
2017-03-22 16:02:48 +01:00
SCAN_TIME = $(( END_TIME - START_TIME ))
2016-01-23 19:18:33 +01:00
datebanner " Done"
2015-09-17 15:30:15 +02:00
2017-03-21 12:44:03 +01:00
" $MEASURE_TIME " && printf " % ${ COLUMNS } s\n " " $SCAN_TIME "
2017-04-12 21:00:08 +02:00
[ [ -e " $MEASURE_TIME_FILE " ] ] && echo " Total : $SCAN_TIME " >> " $MEASURE_TIME_FILE "
2017-03-21 12:44:03 +01:00
2015-09-17 15:30:15 +02:00
return $ret
2015-05-17 22:43:53 +02:00
}
################# main #################
2016-01-31 21:02:18 +01:00
2017-04-06 09:47:09 +02:00
#main() {
# local ret=0
# local ip=""
ret = 0
ip = ""
lets_roll init
initialize_globals
parse_cmd_line " $@ "
# html_header() needs to be called early! Otherwise if html_out() is called before html_header() and the
# command line contains --htmlfile <htmlfile> or --html, it'll make problems with html output, see #692.
# json_header and csv_header can be called later but for context reasons we'll leave it here
html_header
json_header
csv_header
get_install_dir
2017-04-11 18:48:23 +02:00
# see #705, we need to source TLS_DATA_FILE here instead of in get_install_dir(), see #705
2017-04-12 21:00:08 +02:00
[ [ -r " $TLS_DATA_FILE " ] ] && . " $TLS_DATA_FILE "
2017-04-06 09:47:09 +02:00
set_color_functions
maketempf
find_openssl_binary
prepare_debug
prepare_arrays
mybanner
check_proxy
check4openssl_oldfarts
check_bsd_mount
Use CHILD_MASS_TESTING environment variable
This PR introduces the environment variable `CHILD_MASS_TESTING`, and uses it as an indicator that testssl.sh is running as a child within mass testing rather than using the `$APPEND` flag. It also makes a number of other changes to make the handling, of HTML, CSV, JSON, and log files consistent, and it fixes a number of bugs related to the generation of these files when mass testing is being performed.
Please let me know if you disagree with any of the changes in this PR, or if you would prefer that it be broken up into multiple smaller PRs.
Some of the changes are as follows:
- When the `$APPEND` flag is true, all of these files are appended to and headers and footers are omitted. (Perhaps this should be changed. Appending to a log file isn't an issue, but appending to a JSON or HTML file without including headers or footers seems to just create an improperly formatted file).
- Following the code in `prepare_logging()`, an error is printed and the program stops if the `$APPEND` flag is false and one of the files to be written to already exists.
Some of the bugs fixed:
Creating log files did not work with mass testing:
- If `--logfile <logfile>` is used, then the parent and each child try to write to "logfile".
- If `--logging` is used, then a log file is created for each child, but an oddly-named log file is also created for the parent. The one created by the parent contains the entire output.
Plain JSON files:
- When `--jsonfile <jsonfile>` is run, there is no comma separating the final finding for one child and the first finding for the next child.
Pretty JSON files:
- When `--jsonfile-pretty <jsonfile>` is called without mass testing, the "target host" line is empty, since `$NODE` has not yet been set.
- When `--jsonfile <jsonfile>` is run with mass testing, there is no comma separating the final finding for one child and the first finding for the next child. In addition, `fileout_pretty_json_banner()` is never called, and the entries for individual tests have insufficient information to determine what is being tested (it lists "service" and "ip", but not port number).
For the final issue, when mass testing is being performed and all output is being placed in a single file, I have the parent call `fileout_pretty_json_banner()`, but tell `fileout_pretty_json_banner()` to not include a "target host" or "port", but then have each child include a "target host" or "port" (when the "service" and "ip" are being printed).
2017-03-29 17:16:09 +02:00
2017-04-06 09:47:09 +02:00
if " $do_display_only " ; then
prettyprint_local " $PATTERN2SHOW "
exit $?
fi
fileout_banner
2015-08-01 23:11:27 +02:00
2017-04-06 09:47:09 +02:00
if " $do_mass_testing " ; then
prepare_logging
2017-06-12 18:23:55 +02:00
if [ [ " $MASS_TESTING_MODE " = = "parallel" ] ] ; then
2017-05-16 20:16:35 +02:00
run_mass_testing_parallel
else
2017-05-19 17:00:30 +02:00
run_mass_testing
2017-05-16 20:16:35 +02:00
fi
2017-04-06 09:47:09 +02:00
exit $?
fi
html_banner
2017-03-27 00:30:42 +02:00
2017-04-06 09:47:09 +02:00
#TODO: there shouldn't be the need for a special case for --mx, only the ip adresses we would need upfront and the do-parser
if " $do_mx_all_ips " ; then
2017-06-01 18:08:13 +02:00
query_globals # if we have just 1x "do_*" --> we do a standard run -- otherwise just the one specified
2017-04-06 09:47:09 +02:00
[ [ $? -eq 1 ] ] && set_scanning_defaults
2017-06-01 18:08:13 +02:00
run_mx_all_ips " ${ URI } " $PORT # we should reduce run_mx_all_ips to the stuff neccessary as ~15 lines later we have sililar code
2017-04-06 09:47:09 +02:00
exit $?
fi
2017-03-27 00:30:42 +02:00
2017-06-01 18:08:13 +02:00
[ [ -z " $NODE " ] ] && parse_hn_port " ${ URI } " # NODE, URL_PATH, PORT, IPADDR and IP46ADDR is set now
2017-04-06 09:47:09 +02:00
prepare_logging
2017-04-22 15:39:18 +02:00
2017-04-06 09:47:09 +02:00
if ! determine_ip_addresses; then
fatal "No IP address could be determined" 2
fi
2017-06-01 18:08:13 +02:00
if [ [ $( count_words " $IPADDRs " ) -gt 1 ] ] ; then # we have more than one ipv4 address to check
pr_bold " Testing all IPv4 addresses (port $PORT ): " ; outln " $IPADDRs "
for ip in $IPADDRs ; do
2016-06-23 19:42:26 +02:00
draw_line "-" $(( TERM_WIDTH * 2 / 3 ))
2015-09-17 15:30:15 +02:00
outln
2017-06-01 18:08:13 +02:00
NODEIP = " $ip "
2015-09-17 15:30:15 +02:00
lets_roll " ${ STARTTLS_PROTOCOL } "
2017-06-01 18:08:13 +02:00
ret = $(( $? + ret))
done
draw_line "-" $(( TERM_WIDTH * 2 / 3 ))
outln
pr_bold " Done testing now all IP addresses (on port $PORT ): " ; outln " $IPADDRs "
else # Just 1x ip4v to check, applies also if CMDLINE_IP was supplied
NODEIP = " $IPADDRs "
lets_roll " ${ STARTTLS_PROTOCOL } "
ret = $?
2015-09-17 15:30:15 +02:00
fi
2017-04-06 09:47:09 +02:00
#}
2015-05-17 22:43:53 +02:00
2017-04-06 09:47:09 +02:00
#main
2017-04-25 15:12:01 +02:00
exit $ret
2017-03-27 00:30:42 +02:00