first
This commit is contained in:
159
analyzer/tcp/fet.go
Normal file
159
analyzer/tcp/fet.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package tcp
|
||||
|
||||
import "github.com/apernet/OpenGFW/analyzer"
|
||||
|
||||
var _ analyzer.TCPAnalyzer = (*FETAnalyzer)(nil)
|
||||
|
||||
// FETAnalyzer stands for "Fully Encrypted Traffic" analyzer.
|
||||
// It implements an algorithm to detect fully encrypted proxy protocols
|
||||
// such as Shadowsocks, mentioned in the following paper:
|
||||
// https://gfw.report/publications/usenixsecurity23/data/paper/paper.pdf
|
||||
type FETAnalyzer struct{}
|
||||
|
||||
func (a *FETAnalyzer) Name() string {
|
||||
return "fet"
|
||||
}
|
||||
|
||||
func (a *FETAnalyzer) Limit() int {
|
||||
// We only really look at the first packet
|
||||
return 8192
|
||||
}
|
||||
|
||||
func (a *FETAnalyzer) NewTCP(info analyzer.TCPInfo, logger analyzer.Logger) analyzer.TCPStream {
|
||||
return newFETStream(logger)
|
||||
}
|
||||
|
||||
type fetStream struct {
|
||||
logger analyzer.Logger
|
||||
}
|
||||
|
||||
func newFETStream(logger analyzer.Logger) *fetStream {
|
||||
return &fetStream{logger: logger}
|
||||
}
|
||||
|
||||
func (s *fetStream) Feed(rev, start, end bool, skip int, data []byte) (u *analyzer.PropUpdate, done bool) {
|
||||
if skip != 0 {
|
||||
return nil, true
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
ex1 := averagePopCount(data)
|
||||
ex2 := isFirstSixPrintable(data)
|
||||
ex3 := printablePercentage(data)
|
||||
ex4 := contiguousPrintable(data)
|
||||
ex5 := isTLSorHTTP(data)
|
||||
exempt := (ex1 <= 3.4 || ex1 >= 4.6) || ex2 || ex3 > 0.5 || ex4 > 20 || ex5
|
||||
return &analyzer.PropUpdate{
|
||||
Type: analyzer.PropUpdateReplace,
|
||||
M: analyzer.PropMap{
|
||||
"ex1": ex1,
|
||||
"ex2": ex2,
|
||||
"ex3": ex3,
|
||||
"ex4": ex4,
|
||||
"ex5": ex5,
|
||||
"yes": !exempt,
|
||||
},
|
||||
}, true
|
||||
}
|
||||
|
||||
func (s *fetStream) Close(limited bool) *analyzer.PropUpdate {
|
||||
return nil
|
||||
}
|
||||
|
||||
func popCount(b byte) int {
|
||||
count := 0
|
||||
for b != 0 {
|
||||
count += int(b & 1)
|
||||
b >>= 1
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// averagePopCount returns the average popcount of the given bytes.
|
||||
// This is the "Ex1" metric in the paper.
|
||||
func averagePopCount(bytes []byte) float32 {
|
||||
if len(bytes) == 0 {
|
||||
return 0
|
||||
}
|
||||
total := 0
|
||||
for _, b := range bytes {
|
||||
total += popCount(b)
|
||||
}
|
||||
return float32(total) / float32(len(bytes))
|
||||
}
|
||||
|
||||
// isFirstSixPrintable returns true if the first six bytes are printable ASCII.
|
||||
// This is the "Ex2" metric in the paper.
|
||||
func isFirstSixPrintable(bytes []byte) bool {
|
||||
if len(bytes) < 6 {
|
||||
return false
|
||||
}
|
||||
for i := range bytes[:6] {
|
||||
if !isPrintable(bytes[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// printablePercentage returns the percentage of printable ASCII bytes.
|
||||
// This is the "Ex3" metric in the paper.
|
||||
func printablePercentage(bytes []byte) float32 {
|
||||
if len(bytes) == 0 {
|
||||
return 0
|
||||
}
|
||||
count := 0
|
||||
for i := range bytes {
|
||||
if isPrintable(bytes[i]) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return float32(count) / float32(len(bytes))
|
||||
}
|
||||
|
||||
// contiguousPrintable returns the length of the longest contiguous sequence of
|
||||
// printable ASCII bytes.
|
||||
// This is the "Ex4" metric in the paper.
|
||||
func contiguousPrintable(bytes []byte) int {
|
||||
if len(bytes) == 0 {
|
||||
return 0
|
||||
}
|
||||
maxCount := 0
|
||||
current := 0
|
||||
for i := range bytes {
|
||||
if isPrintable(bytes[i]) {
|
||||
current++
|
||||
} else {
|
||||
if current > maxCount {
|
||||
maxCount = current
|
||||
}
|
||||
current = 0
|
||||
}
|
||||
}
|
||||
if current > maxCount {
|
||||
maxCount = current
|
||||
}
|
||||
return maxCount
|
||||
}
|
||||
|
||||
// isTLSorHTTP returns true if the given bytes look like TLS or HTTP.
|
||||
// This is the "Ex5" metric in the paper.
|
||||
func isTLSorHTTP(bytes []byte) bool {
|
||||
if len(bytes) < 3 {
|
||||
return false
|
||||
}
|
||||
if bytes[0] == 0x16 && bytes[1] == 0x03 && bytes[2] <= 0x03 {
|
||||
// TLS handshake for TLS 1.0-1.3
|
||||
return true
|
||||
}
|
||||
// HTTP request
|
||||
str := string(bytes[:3])
|
||||
return str == "GET" || str == "HEA" || str == "POS" ||
|
||||
str == "PUT" || str == "DEL" || str == "CON" ||
|
||||
str == "OPT" || str == "TRA" || str == "PAT"
|
||||
}
|
||||
|
||||
func isPrintable(b byte) bool {
|
||||
return b >= 0x20 && b <= 0x7e
|
||||
}
|
||||
193
analyzer/tcp/http.go
Normal file
193
analyzer/tcp/http.go
Normal file
@@ -0,0 +1,193 @@
|
||||
package tcp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/apernet/OpenGFW/analyzer"
|
||||
"github.com/apernet/OpenGFW/analyzer/utils"
|
||||
)
|
||||
|
||||
var _ analyzer.TCPAnalyzer = (*HTTPAnalyzer)(nil)
|
||||
|
||||
type HTTPAnalyzer struct{}
|
||||
|
||||
func (a *HTTPAnalyzer) Name() string {
|
||||
return "http"
|
||||
}
|
||||
|
||||
func (a *HTTPAnalyzer) Limit() int {
|
||||
return 8192
|
||||
}
|
||||
|
||||
func (a *HTTPAnalyzer) NewTCP(info analyzer.TCPInfo, logger analyzer.Logger) analyzer.TCPStream {
|
||||
return newHTTPStream(logger)
|
||||
}
|
||||
|
||||
type httpStream struct {
|
||||
logger analyzer.Logger
|
||||
|
||||
reqBuf *utils.ByteBuffer
|
||||
reqMap analyzer.PropMap
|
||||
reqUpdated bool
|
||||
reqLSM *utils.LinearStateMachine
|
||||
reqDone bool
|
||||
|
||||
respBuf *utils.ByteBuffer
|
||||
respMap analyzer.PropMap
|
||||
respUpdated bool
|
||||
respLSM *utils.LinearStateMachine
|
||||
respDone bool
|
||||
}
|
||||
|
||||
func newHTTPStream(logger analyzer.Logger) *httpStream {
|
||||
s := &httpStream{logger: logger, reqBuf: &utils.ByteBuffer{}, respBuf: &utils.ByteBuffer{}}
|
||||
s.reqLSM = utils.NewLinearStateMachine(
|
||||
s.parseRequestLine,
|
||||
s.parseRequestHeaders,
|
||||
)
|
||||
s.respLSM = utils.NewLinearStateMachine(
|
||||
s.parseResponseLine,
|
||||
s.parseResponseHeaders,
|
||||
)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *httpStream) Feed(rev, start, end bool, skip int, data []byte) (u *analyzer.PropUpdate, d bool) {
|
||||
if skip != 0 {
|
||||
return nil, true
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
var update *analyzer.PropUpdate
|
||||
var cancelled bool
|
||||
if rev {
|
||||
s.respBuf.Append(data)
|
||||
s.respUpdated = false
|
||||
cancelled, s.respDone = s.respLSM.Run()
|
||||
if s.respUpdated {
|
||||
update = &analyzer.PropUpdate{
|
||||
Type: analyzer.PropUpdateMerge,
|
||||
M: analyzer.PropMap{"resp": s.respMap},
|
||||
}
|
||||
s.respUpdated = false
|
||||
}
|
||||
} else {
|
||||
s.reqBuf.Append(data)
|
||||
s.reqUpdated = false
|
||||
cancelled, s.reqDone = s.reqLSM.Run()
|
||||
if s.reqUpdated {
|
||||
update = &analyzer.PropUpdate{
|
||||
Type: analyzer.PropUpdateMerge,
|
||||
M: analyzer.PropMap{"req": s.reqMap},
|
||||
}
|
||||
s.reqUpdated = false
|
||||
}
|
||||
}
|
||||
return update, cancelled || (s.reqDone && s.respDone)
|
||||
}
|
||||
|
||||
func (s *httpStream) parseRequestLine() utils.LSMAction {
|
||||
// Find the end of the request line
|
||||
line, ok := s.reqBuf.GetUntil([]byte("\r\n"), true, true)
|
||||
if !ok {
|
||||
// No end of line yet, but maybe we just need more data
|
||||
return utils.LSMActionPause
|
||||
}
|
||||
fields := strings.Fields(string(line[:len(line)-2])) // Strip \r\n
|
||||
if len(fields) != 3 {
|
||||
// Invalid request line
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
method := fields[0]
|
||||
path := fields[1]
|
||||
version := fields[2]
|
||||
if !strings.HasPrefix(version, "HTTP/") {
|
||||
// Invalid version
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
s.reqMap = analyzer.PropMap{
|
||||
"method": method,
|
||||
"path": path,
|
||||
"version": version,
|
||||
}
|
||||
s.reqUpdated = true
|
||||
return utils.LSMActionNext
|
||||
}
|
||||
|
||||
func (s *httpStream) parseResponseLine() utils.LSMAction {
|
||||
// Find the end of the response line
|
||||
line, ok := s.respBuf.GetUntil([]byte("\r\n"), true, true)
|
||||
if !ok {
|
||||
// No end of line yet, but maybe we just need more data
|
||||
return utils.LSMActionPause
|
||||
}
|
||||
fields := strings.Fields(string(line[:len(line)-2])) // Strip \r\n
|
||||
if len(fields) < 2 {
|
||||
// Invalid response line
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
version := fields[0]
|
||||
status, _ := strconv.Atoi(fields[1])
|
||||
if !strings.HasPrefix(version, "HTTP/") || status == 0 {
|
||||
// Invalid version
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
s.respMap = analyzer.PropMap{
|
||||
"version": version,
|
||||
"status": status,
|
||||
}
|
||||
s.respUpdated = true
|
||||
return utils.LSMActionNext
|
||||
}
|
||||
|
||||
func (s *httpStream) parseHeaders(buf *utils.ByteBuffer) (utils.LSMAction, analyzer.PropMap) {
|
||||
// Find the end of headers
|
||||
headers, ok := buf.GetUntil([]byte("\r\n\r\n"), true, true)
|
||||
if !ok {
|
||||
// No end of headers yet, but maybe we just need more data
|
||||
return utils.LSMActionPause, nil
|
||||
}
|
||||
headers = headers[:len(headers)-4] // Strip \r\n\r\n
|
||||
headerMap := make(analyzer.PropMap)
|
||||
for _, line := range bytes.Split(headers, []byte("\r\n")) {
|
||||
fields := bytes.SplitN(line, []byte(":"), 2)
|
||||
if len(fields) != 2 {
|
||||
// Invalid header
|
||||
return utils.LSMActionCancel, nil
|
||||
}
|
||||
key := string(bytes.TrimSpace(fields[0]))
|
||||
value := string(bytes.TrimSpace(fields[1]))
|
||||
// Normalize header keys to lowercase
|
||||
headerMap[strings.ToLower(key)] = value
|
||||
}
|
||||
return utils.LSMActionNext, headerMap
|
||||
}
|
||||
|
||||
func (s *httpStream) parseRequestHeaders() utils.LSMAction {
|
||||
action, headerMap := s.parseHeaders(s.reqBuf)
|
||||
if action == utils.LSMActionNext {
|
||||
s.reqMap["headers"] = headerMap
|
||||
s.reqUpdated = true
|
||||
}
|
||||
return action
|
||||
}
|
||||
|
||||
func (s *httpStream) parseResponseHeaders() utils.LSMAction {
|
||||
action, headerMap := s.parseHeaders(s.respBuf)
|
||||
if action == utils.LSMActionNext {
|
||||
s.respMap["headers"] = headerMap
|
||||
s.respUpdated = true
|
||||
}
|
||||
return action
|
||||
}
|
||||
|
||||
func (s *httpStream) Close(limited bool) *analyzer.PropUpdate {
|
||||
s.reqBuf.Reset()
|
||||
s.respBuf.Reset()
|
||||
s.reqMap = nil
|
||||
s.respMap = nil
|
||||
return nil
|
||||
}
|
||||
147
analyzer/tcp/ssh.go
Normal file
147
analyzer/tcp/ssh.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package tcp
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/apernet/OpenGFW/analyzer"
|
||||
"github.com/apernet/OpenGFW/analyzer/utils"
|
||||
)
|
||||
|
||||
var _ analyzer.TCPAnalyzer = (*SSHAnalyzer)(nil)
|
||||
|
||||
type SSHAnalyzer struct{}
|
||||
|
||||
func (a *SSHAnalyzer) Name() string {
|
||||
return "ssh"
|
||||
}
|
||||
|
||||
func (a *SSHAnalyzer) Limit() int {
|
||||
return 1024
|
||||
}
|
||||
|
||||
func (a *SSHAnalyzer) NewTCP(info analyzer.TCPInfo, logger analyzer.Logger) analyzer.TCPStream {
|
||||
return newSSHStream(logger)
|
||||
}
|
||||
|
||||
type sshStream struct {
|
||||
logger analyzer.Logger
|
||||
|
||||
clientBuf *utils.ByteBuffer
|
||||
clientMap analyzer.PropMap
|
||||
clientUpdated bool
|
||||
clientLSM *utils.LinearStateMachine
|
||||
clientDone bool
|
||||
|
||||
serverBuf *utils.ByteBuffer
|
||||
serverMap analyzer.PropMap
|
||||
serverUpdated bool
|
||||
serverLSM *utils.LinearStateMachine
|
||||
serverDone bool
|
||||
}
|
||||
|
||||
func newSSHStream(logger analyzer.Logger) *sshStream {
|
||||
s := &sshStream{logger: logger, clientBuf: &utils.ByteBuffer{}, serverBuf: &utils.ByteBuffer{}}
|
||||
s.clientLSM = utils.NewLinearStateMachine(
|
||||
s.parseClientExchangeLine,
|
||||
)
|
||||
s.serverLSM = utils.NewLinearStateMachine(
|
||||
s.parseServerExchangeLine,
|
||||
)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *sshStream) Feed(rev, start, end bool, skip int, data []byte) (u *analyzer.PropUpdate, done bool) {
|
||||
if skip != 0 {
|
||||
return nil, true
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
var update *analyzer.PropUpdate
|
||||
var cancelled bool
|
||||
if rev {
|
||||
s.serverBuf.Append(data)
|
||||
s.serverUpdated = false
|
||||
cancelled, s.serverDone = s.serverLSM.Run()
|
||||
if s.serverUpdated {
|
||||
update = &analyzer.PropUpdate{
|
||||
Type: analyzer.PropUpdateMerge,
|
||||
M: analyzer.PropMap{"server": s.serverMap},
|
||||
}
|
||||
s.serverUpdated = false
|
||||
}
|
||||
} else {
|
||||
s.clientBuf.Append(data)
|
||||
s.clientUpdated = false
|
||||
cancelled, s.clientDone = s.clientLSM.Run()
|
||||
if s.clientUpdated {
|
||||
update = &analyzer.PropUpdate{
|
||||
Type: analyzer.PropUpdateMerge,
|
||||
M: analyzer.PropMap{"client": s.clientMap},
|
||||
}
|
||||
s.clientUpdated = false
|
||||
}
|
||||
}
|
||||
return update, cancelled || (s.clientDone && s.serverDone)
|
||||
}
|
||||
|
||||
// parseExchangeLine parses the SSH Protocol Version Exchange string.
|
||||
// See RFC 4253, section 4.2.
|
||||
// "SSH-protoversion-softwareversion SP comments CR LF"
|
||||
// The "comments" part (along with the SP) is optional.
|
||||
func (s *sshStream) parseExchangeLine(buf *utils.ByteBuffer) (utils.LSMAction, analyzer.PropMap) {
|
||||
// Find the end of the line
|
||||
line, ok := buf.GetUntil([]byte("\r\n"), true, true)
|
||||
if !ok {
|
||||
// No end of line yet, but maybe we just need more data
|
||||
return utils.LSMActionPause, nil
|
||||
}
|
||||
if !strings.HasPrefix(string(line), "SSH-") {
|
||||
// Not SSH
|
||||
return utils.LSMActionCancel, nil
|
||||
}
|
||||
fields := strings.Fields(string(line[:len(line)-2])) // Strip \r\n
|
||||
if len(fields) < 1 || len(fields) > 2 {
|
||||
// Invalid line
|
||||
return utils.LSMActionCancel, nil
|
||||
}
|
||||
sshFields := strings.SplitN(fields[0], "-", 3)
|
||||
if len(sshFields) != 3 {
|
||||
// Invalid SSH version format
|
||||
return utils.LSMActionCancel, nil
|
||||
}
|
||||
sMap := analyzer.PropMap{
|
||||
"protocol": sshFields[1],
|
||||
"software": sshFields[2],
|
||||
}
|
||||
if len(fields) == 2 {
|
||||
sMap["comments"] = fields[1]
|
||||
}
|
||||
return utils.LSMActionNext, sMap
|
||||
}
|
||||
|
||||
func (s *sshStream) parseClientExchangeLine() utils.LSMAction {
|
||||
action, sMap := s.parseExchangeLine(s.clientBuf)
|
||||
if action == utils.LSMActionNext {
|
||||
s.clientMap = sMap
|
||||
s.clientUpdated = true
|
||||
}
|
||||
return action
|
||||
}
|
||||
|
||||
func (s *sshStream) parseServerExchangeLine() utils.LSMAction {
|
||||
action, sMap := s.parseExchangeLine(s.serverBuf)
|
||||
if action == utils.LSMActionNext {
|
||||
s.serverMap = sMap
|
||||
s.serverUpdated = true
|
||||
}
|
||||
return action
|
||||
}
|
||||
|
||||
func (s *sshStream) Close(limited bool) *analyzer.PropUpdate {
|
||||
s.clientBuf.Reset()
|
||||
s.serverBuf.Reset()
|
||||
s.clientMap = nil
|
||||
s.serverMap = nil
|
||||
return nil
|
||||
}
|
||||
354
analyzer/tcp/tls.go
Normal file
354
analyzer/tcp/tls.go
Normal file
@@ -0,0 +1,354 @@
|
||||
package tcp
|
||||
|
||||
import (
|
||||
"github.com/apernet/OpenGFW/analyzer"
|
||||
"github.com/apernet/OpenGFW/analyzer/utils"
|
||||
)
|
||||
|
||||
var _ analyzer.TCPAnalyzer = (*TLSAnalyzer)(nil)
|
||||
|
||||
type TLSAnalyzer struct{}
|
||||
|
||||
func (a *TLSAnalyzer) Name() string {
|
||||
return "tls"
|
||||
}
|
||||
|
||||
func (a *TLSAnalyzer) Limit() int {
|
||||
return 8192
|
||||
}
|
||||
|
||||
func (a *TLSAnalyzer) NewTCP(info analyzer.TCPInfo, logger analyzer.Logger) analyzer.TCPStream {
|
||||
return newTLSStream(logger)
|
||||
}
|
||||
|
||||
type tlsStream struct {
|
||||
logger analyzer.Logger
|
||||
|
||||
reqBuf *utils.ByteBuffer
|
||||
reqMap analyzer.PropMap
|
||||
reqUpdated bool
|
||||
reqLSM *utils.LinearStateMachine
|
||||
reqDone bool
|
||||
|
||||
respBuf *utils.ByteBuffer
|
||||
respMap analyzer.PropMap
|
||||
respUpdated bool
|
||||
respLSM *utils.LinearStateMachine
|
||||
respDone bool
|
||||
|
||||
clientHelloLen int
|
||||
serverHelloLen int
|
||||
}
|
||||
|
||||
func newTLSStream(logger analyzer.Logger) *tlsStream {
|
||||
s := &tlsStream{logger: logger, reqBuf: &utils.ByteBuffer{}, respBuf: &utils.ByteBuffer{}}
|
||||
s.reqLSM = utils.NewLinearStateMachine(
|
||||
s.tlsClientHelloSanityCheck,
|
||||
s.parseClientHello,
|
||||
)
|
||||
s.respLSM = utils.NewLinearStateMachine(
|
||||
s.tlsServerHelloSanityCheck,
|
||||
s.parseServerHello,
|
||||
)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *tlsStream) Feed(rev, start, end bool, skip int, data []byte) (u *analyzer.PropUpdate, done bool) {
|
||||
if skip != 0 {
|
||||
return nil, true
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return nil, false
|
||||
}
|
||||
var update *analyzer.PropUpdate
|
||||
var cancelled bool
|
||||
if rev {
|
||||
s.respBuf.Append(data)
|
||||
s.respUpdated = false
|
||||
cancelled, s.respDone = s.respLSM.Run()
|
||||
if s.respUpdated {
|
||||
update = &analyzer.PropUpdate{
|
||||
Type: analyzer.PropUpdateMerge,
|
||||
M: analyzer.PropMap{"resp": s.respMap},
|
||||
}
|
||||
s.respUpdated = false
|
||||
}
|
||||
} else {
|
||||
s.reqBuf.Append(data)
|
||||
s.reqUpdated = false
|
||||
cancelled, s.reqDone = s.reqLSM.Run()
|
||||
if s.reqUpdated {
|
||||
update = &analyzer.PropUpdate{
|
||||
Type: analyzer.PropUpdateMerge,
|
||||
M: analyzer.PropMap{"req": s.reqMap},
|
||||
}
|
||||
s.reqUpdated = false
|
||||
}
|
||||
}
|
||||
return update, cancelled || (s.reqDone && s.respDone)
|
||||
}
|
||||
|
||||
func (s *tlsStream) tlsClientHelloSanityCheck() utils.LSMAction {
|
||||
data, ok := s.reqBuf.Get(9, true)
|
||||
if !ok {
|
||||
return utils.LSMActionPause
|
||||
}
|
||||
if data[0] != 0x16 || data[5] != 0x01 {
|
||||
// Not a TLS handshake, or not a client hello
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
s.clientHelloLen = int(data[6])<<16 | int(data[7])<<8 | int(data[8])
|
||||
if s.clientHelloLen < 41 {
|
||||
// 2 (Protocol Version) +
|
||||
// 32 (Random) +
|
||||
// 1 (Session ID Length) +
|
||||
// 2 (Cipher Suites Length) +_ws.col.protocol == "TLSv1.3"
|
||||
// 2 (Cipher Suite) +
|
||||
// 1 (Compression Methods Length) +
|
||||
// 1 (Compression Method) +
|
||||
// No extensions
|
||||
// This should be the bare minimum for a client hello
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
return utils.LSMActionNext
|
||||
}
|
||||
|
||||
func (s *tlsStream) tlsServerHelloSanityCheck() utils.LSMAction {
|
||||
data, ok := s.respBuf.Get(9, true)
|
||||
if !ok {
|
||||
return utils.LSMActionPause
|
||||
}
|
||||
if data[0] != 0x16 || data[5] != 0x02 {
|
||||
// Not a TLS handshake, or not a server hello
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
s.serverHelloLen = int(data[6])<<16 | int(data[7])<<8 | int(data[8])
|
||||
if s.serverHelloLen < 38 {
|
||||
// 2 (Protocol Version) +
|
||||
// 32 (Random) +
|
||||
// 1 (Session ID Length) +
|
||||
// 2 (Cipher Suite) +
|
||||
// 1 (Compression Method) +
|
||||
// No extensions
|
||||
// This should be the bare minimum for a server hello
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
return utils.LSMActionNext
|
||||
}
|
||||
|
||||
func (s *tlsStream) parseClientHello() utils.LSMAction {
|
||||
chBuf, ok := s.reqBuf.GetSubBuffer(s.clientHelloLen, true)
|
||||
if !ok {
|
||||
// Not a full client hello yet
|
||||
return utils.LSMActionPause
|
||||
}
|
||||
s.reqUpdated = true
|
||||
s.reqMap = make(analyzer.PropMap)
|
||||
// Version, random & session ID length combined are within 35 bytes,
|
||||
// so no need for bounds checking
|
||||
s.reqMap["version"], _ = chBuf.GetUint16(false, true)
|
||||
s.reqMap["random"], _ = chBuf.Get(32, true)
|
||||
sessionIDLen, _ := chBuf.GetByte(true)
|
||||
s.reqMap["session"], ok = chBuf.Get(int(sessionIDLen), true)
|
||||
if !ok {
|
||||
// Not enough data for session ID
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
cipherSuitesLen, ok := chBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
// Not enough data for cipher suites length
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
if cipherSuitesLen%2 != 0 {
|
||||
// Cipher suites are 2 bytes each, so must be even
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
ciphers := make([]uint16, cipherSuitesLen/2)
|
||||
for i := range ciphers {
|
||||
ciphers[i], ok = chBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
}
|
||||
s.reqMap["ciphers"] = ciphers
|
||||
compressionMethodsLen, ok := chBuf.GetByte(true)
|
||||
if !ok {
|
||||
// Not enough data for compression methods length
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
// Compression methods are 1 byte each, we just put a byte slice here
|
||||
s.reqMap["compression"], ok = chBuf.Get(int(compressionMethodsLen), true)
|
||||
if !ok {
|
||||
// Not enough data for compression methods
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
extsLen, ok := chBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
// No extensions, I guess it's possible?
|
||||
return utils.LSMActionNext
|
||||
}
|
||||
extBuf, ok := chBuf.GetSubBuffer(int(extsLen), true)
|
||||
if !ok {
|
||||
// Not enough data for extensions
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
for extBuf.Len() > 0 {
|
||||
extType, ok := extBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
// Not enough data for extension type
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
extLen, ok := extBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
// Not enough data for extension length
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
extDataBuf, ok := extBuf.GetSubBuffer(int(extLen), true)
|
||||
if !ok || !s.handleExtensions(extType, extDataBuf, s.reqMap) {
|
||||
// Not enough data for extension data, or invalid extension
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
}
|
||||
return utils.LSMActionNext
|
||||
}
|
||||
|
||||
func (s *tlsStream) parseServerHello() utils.LSMAction {
|
||||
shBuf, ok := s.respBuf.GetSubBuffer(s.serverHelloLen, true)
|
||||
if !ok {
|
||||
// Not a full server hello yet
|
||||
return utils.LSMActionPause
|
||||
}
|
||||
s.respUpdated = true
|
||||
s.respMap = make(analyzer.PropMap)
|
||||
// Version, random & session ID length combined are within 35 bytes,
|
||||
// so no need for bounds checking
|
||||
s.respMap["version"], _ = shBuf.GetUint16(false, true)
|
||||
s.respMap["random"], _ = shBuf.Get(32, true)
|
||||
sessionIDLen, _ := shBuf.GetByte(true)
|
||||
s.respMap["session"], ok = shBuf.Get(int(sessionIDLen), true)
|
||||
if !ok {
|
||||
// Not enough data for session ID
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
cipherSuite, ok := shBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
// Not enough data for cipher suite
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
s.respMap["cipher"] = cipherSuite
|
||||
compressionMethod, ok := shBuf.GetByte(true)
|
||||
if !ok {
|
||||
// Not enough data for compression method
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
s.respMap["compression"] = compressionMethod
|
||||
extsLen, ok := shBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
// No extensions, I guess it's possible?
|
||||
return utils.LSMActionNext
|
||||
}
|
||||
extBuf, ok := shBuf.GetSubBuffer(int(extsLen), true)
|
||||
if !ok {
|
||||
// Not enough data for extensions
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
for extBuf.Len() > 0 {
|
||||
extType, ok := extBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
// Not enough data for extension type
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
extLen, ok := extBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
// Not enough data for extension length
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
extDataBuf, ok := extBuf.GetSubBuffer(int(extLen), true)
|
||||
if !ok || !s.handleExtensions(extType, extDataBuf, s.respMap) {
|
||||
// Not enough data for extension data, or invalid extension
|
||||
return utils.LSMActionCancel
|
||||
}
|
||||
}
|
||||
return utils.LSMActionNext
|
||||
}
|
||||
|
||||
func (s *tlsStream) handleExtensions(extType uint16, extDataBuf *utils.ByteBuffer, m analyzer.PropMap) bool {
|
||||
switch extType {
|
||||
case 0x0000: // SNI
|
||||
ok := extDataBuf.Skip(2) // Ignore list length, we only care about the first entry for now
|
||||
if !ok {
|
||||
// Not enough data for list length
|
||||
return false
|
||||
}
|
||||
sniType, ok := extDataBuf.GetByte(true)
|
||||
if !ok || sniType != 0 {
|
||||
// Not enough data for SNI type, or not hostname
|
||||
return false
|
||||
}
|
||||
sniLen, ok := extDataBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
// Not enough data for SNI length
|
||||
return false
|
||||
}
|
||||
m["sni"], ok = extDataBuf.GetString(int(sniLen), true)
|
||||
if !ok {
|
||||
// Not enough data for SNI
|
||||
return false
|
||||
}
|
||||
case 0x0010: // ALPN
|
||||
ok := extDataBuf.Skip(2) // Ignore list length, as we read until the end
|
||||
if !ok {
|
||||
// Not enough data for list length
|
||||
return false
|
||||
}
|
||||
var alpnList []string
|
||||
for extDataBuf.Len() > 0 {
|
||||
alpnLen, ok := extDataBuf.GetByte(true)
|
||||
if !ok {
|
||||
// Not enough data for ALPN length
|
||||
return false
|
||||
}
|
||||
alpn, ok := extDataBuf.GetString(int(alpnLen), true)
|
||||
if !ok {
|
||||
// Not enough data for ALPN
|
||||
return false
|
||||
}
|
||||
alpnList = append(alpnList, alpn)
|
||||
}
|
||||
m["alpn"] = alpnList
|
||||
case 0x002b: // Supported Versions
|
||||
if extDataBuf.Len() == 2 {
|
||||
// Server only selects one version
|
||||
m["supported_versions"], _ = extDataBuf.GetUint16(false, true)
|
||||
} else {
|
||||
// Client sends a list of versions
|
||||
ok := extDataBuf.Skip(1) // Ignore list length, as we read until the end
|
||||
if !ok {
|
||||
// Not enough data for list length
|
||||
return false
|
||||
}
|
||||
var versions []uint16
|
||||
for extDataBuf.Len() > 0 {
|
||||
ver, ok := extDataBuf.GetUint16(false, true)
|
||||
if !ok {
|
||||
// Not enough data for version
|
||||
return false
|
||||
}
|
||||
versions = append(versions, ver)
|
||||
}
|
||||
m["supported_versions"] = versions
|
||||
}
|
||||
case 0xfe0d: // ECH
|
||||
// We can't parse ECH for now, just set a flag
|
||||
m["ech"] = true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *tlsStream) Close(limited bool) *analyzer.PropUpdate {
|
||||
s.reqBuf.Reset()
|
||||
s.respBuf.Reset()
|
||||
s.reqMap = nil
|
||||
s.respMap = nil
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user