Compare commits

...

88 Commits

Author SHA1 Message Date
fatedier
134a46c00b Merge pull request #1369 from fatedier/dev
bump version to v0.28.2
2019-08-09 12:59:13 +08:00
fatedier
50796643fb Merge pull request #1368 from fatedier/new
fix health check bug, fix #1367
2019-08-09 12:52:46 +08:00
fatedier
b1838b1d5e bump version to v0.28.2 2019-08-09 12:50:33 +08:00
fatedier
757b3613fe fix health check bug, fix #1367 2019-08-09 12:47:27 +08:00
fatedier
ae08811636 Merge pull request #1364 from fatedier/dev
bump version to v0.28.1 and remove support for go1.11
2019-08-08 17:32:57 +08:00
fatedier
b657c0fe09 Merge pull request #1358 from fatedier/new
update vendor packages
2019-08-06 18:59:03 +08:00
fatedier
84df71047c no support for go1.11 2019-08-06 18:53:32 +08:00
fatedier
abc6d720d0 vendor update github.com/gorilla/websocket 2019-08-06 18:53:15 +08:00
fatedier
80154639e3 fix 2019-08-06 17:29:35 +08:00
fatedier
f2117d8331 bump version to v0.28.1 2019-08-06 16:51:55 +08:00
fatedier
261be6a7b7 add vendor files 2019-08-06 16:50:54 +08:00
fatedier
b53a2c1ed9 update reverseproxy from std libraries 2019-08-06 16:49:22 +08:00
fatedier
ee0df07a3c vendor update 2019-08-03 23:23:00 +08:00
fatedier
4e363eca2b update version of github.com/gorilla/mux 2019-08-03 23:22:22 +08:00
fatedier
4277405c0e update vendors 2019-08-03 18:49:55 +08:00
fatedier
6a99f0caf7 update testify and kcp-go 2019-08-03 18:44:11 +08:00
fatedier
394af08561 close session in login() 2019-08-03 16:43:21 +08:00
fatedier
6451583e60 Merge pull request #1349 from fatedier/dev
bump version to v0.28.0
2019-08-01 14:04:55 +08:00
fatedier
30cb0a3ab0 Merge pull request #1344 from fatedier/new
support http load balancing
2019-08-01 13:59:41 +08:00
fatedier
5680a88267 fix connection leak when login_fail_exit is false, fix #1335 2019-07-31 00:50:38 +08:00
fatedier
6b089858db bump version to v0.28.0 2019-07-31 00:47:50 +08:00
fatedier
b3ed863021 support http load balancing 2019-07-31 00:41:58 +08:00
fatedier
5796c27ed5 doc: update 2019-07-31 00:41:43 +08:00
fatedier
310e8dd768 Merge pull request #1331 from muesli/typo-fixes
Fixed typos in comments
2019-07-19 18:50:29 +08:00
Christian Muehlhaeuser
0b40ac2dbc Fixed typos in comments
Just nitpicky typo fixes.
2019-07-19 12:40:14 +02:00
fatedier
f22c8e0882 Merge pull request #1323 from skyrocknroll/skyrocknroll-patch-1
Typo
2019-07-15 14:03:57 +08:00
Yuvaraj L
a388bb2c95 Typo
English Grammar Typo
2019-07-15 01:05:43 +05:30
fatedier
e611c44dea Merge pull request #1322 from fatedier/dev
bump version to v0.27.1
2019-07-14 20:00:31 +08:00
fatedier
8e36e2bb67 Merge pull request #1320 from fatedier/new
add read timeout for TLS check operation
2019-07-14 10:57:22 +08:00
fatedier
541ad8d899 update ISSUE_TEMPLATE 2019-07-12 17:59:45 +08:00
fatedier
17cc0735d1 add read timeout for TLS check operation 2019-07-12 17:11:03 +08:00
fatedier
fd336a5503 Merge pull request #1275 from Arugal/dev
replace the _
2019-06-02 21:22:29 +08:00
zhangwei
802d1c1861 replace the _ 2019-06-01 10:09:13 +08:00
fatedier
65fe0a1179 Merge pull request #1271 from jiajunhuang/resp_body_should_be_closed
resp.Body must be closed after function return
2019-06-01 00:44:03 +08:00
Jiajun Huang
2d24879fa3 fix 2019-05-31 15:56:05 +08:00
Jiajun Huang
75383a95b3 resp.Body must be closed after function return
whether it's success or fail, otherwise it will cause memory leak
ref: https://golang.org/pkg/net/http/
2019-05-30 22:32:36 +08:00
fatedier
95444ea46b Merge pull request #1216 from fatedier/dev
bump version to v0.27.0
2019-04-25 14:41:05 +08:00
fatedier
9f9c01b520 Merge pull request #1215 from fatedier/new
merge new features
2019-04-25 14:38:05 +08:00
fatedier
285d1eba0d bump version to v0.27.0 2019-04-25 12:31:20 +08:00
fatedier
0dfd3a421c frps: support custom_404_page 2019-04-25 12:29:34 +08:00
fatedier
6a1f15b25e support proxy protocol in unix_domain_socket 2019-04-25 12:01:57 +08:00
Gihan
9f47c324b7 api error fix due to status code 2019-04-25 09:54:56 +08:00
fatedier
f0df6084af Merge pull request #1206 from bgkavinga/master
api error fix due to status code
2019-04-24 12:06:13 +08:00
Gihan
879ca47590 api error fix due to status code 2019-04-21 13:29:35 +05:30
fatedier
6a7efc81c9 Merge pull request #1191 from fatedier/dev
Bump version to v0.26.0
2019-04-10 14:02:03 +08:00
fatedier
12c5c553c3 Merge pull request #1190 from fatedier/new
new features
2019-04-10 13:57:59 +08:00
fatedier
988e9b1de3 update doc 2019-04-10 13:51:05 +08:00
fatedier
db6bbc5187 frpc: new plugin https2http 2019-04-10 12:02:22 +08:00
fatedier
c67b4e7b94 vendor: add packages 2019-04-10 10:53:45 +08:00
fatedier
b7a73d3469 support proxy protocol for type http 2019-04-10 10:51:01 +08:00
fatedier
7f9d88c10a fix 2019-04-08 15:39:14 +08:00
fatedier
79237d2b94 bump version to v0.26.0 2019-03-29 19:40:25 +08:00
fatedier
9c4ec56491 support proxy protocol 2019-03-29 19:01:18 +08:00
fatedier
74a8752570 fix route conflict 2019-03-29 17:12:44 +08:00
fatedier
a8ab4c5003 Merge pull request #1160 from fatedier/dev
bump version to v0.25.3, fix #1159
2019-03-26 19:33:39 +08:00
fatedier
9cee263c91 fix panic error when reconnecting using tls 2019-03-26 19:28:24 +08:00
fatedier
c6bf6f59e6 update package.sh 2019-03-25 18:38:02 +08:00
fatedier
4b7aef2196 Merge pull request #1157 from fatedier/dev
bump version to v0.25.2
2019-03-25 18:26:33 +08:00
fatedier
f6d0046b5a bump version to v0.25.2 2019-03-25 18:22:35 +08:00
fatedier
84363266d2 Merge pull request #1156 from fatedier/new
fix health check unclosed resp body
2019-03-25 18:22:15 +08:00
fatedier
9ac8f2a047 fix health check unclosed resp body, fix #1155 2019-03-25 18:17:33 +08:00
fatedier
b2b55533b8 Merge pull request #1147 from a-wing/dev
Add systemd unit
2019-03-21 17:46:36 +08:00
a-wing
a4cfab689a Add systemd unit
Ref https://github.com/fatedier/frp/issues/1058
Ref https://aur.archlinux.org/packages/frp/

Co-authored-by: vimsucks <dev@vimsucks.com>
2019-03-21 11:38:34 +08:00
fatedier
c7df39074c Merge pull request #1140 from fatedier/kcp
update kcp-go package
2019-03-17 17:15:44 +08:00
fatedier
fdcdccb0c2 update kcp-go package 2019-03-17 17:09:54 +08:00
fatedier
e945c1667a Merge pull request #1138 from fatedier/dev
bump version to v0.25.1
2019-03-15 17:05:09 +08:00
fatedier
87a4de4370 Merge pull request #1137 from fatedier/fix
some fixes
2019-03-15 17:00:59 +08:00
fatedier
e1e2913b77 bump version to v0.25.1 2019-03-15 16:46:22 +08:00
fatedier
9be24db410 support multilevel subdomain, fix #1132 2019-03-15 16:22:41 +08:00
fatedier
6b61cb3742 fix frps --log_file useless, fix #1125 2019-03-15 15:37:17 +08:00
fatedier
90b7f2080f Merge pull request #1122 from fatedier/dev
bump version to v0.25.0
2019-03-11 17:40:39 +08:00
fatedier
d1f1c72a55 update ci 2019-03-11 17:11:26 +08:00
fatedier
1925847ef8 update doc 2019-03-11 16:24:54 +08:00
fatedier
8b216b0ca9 Merge pull request #1121 from fatedier/new
new feature
2019-03-11 16:05:18 +08:00
fatedier
dbfeea99f3 update .travis.yml, support go1.12 2019-03-11 16:02:45 +08:00
fatedier
5e64bbfa7c vendor: update package 2019-03-11 15:54:55 +08:00
fatedier
e691a40260 improve the stability of xtcp 2019-03-11 15:53:58 +08:00
fatedier
d812488767 support tls connection 2019-03-11 14:14:31 +08:00
fatedier
3c03690ab7 Merge pull request #1112 from fatedier/p2p
xtcp: wrap yamux on kcp connections, fix #1103
2019-03-05 11:27:15 +08:00
fatedier
3df27b9c04 xtcp: wrap yamux on kcp connections 2019-03-05 11:18:17 +08:00
fatedier
ba45d29b7c fix xtcp cmd 2019-03-03 23:44:44 +08:00
fatedier
3cf83f57a8 update yamux version 2019-03-03 22:29:08 +08:00
fatedier
03e4318d79 Merge pull request #1107 from likev/patch-1
Update instruction of 'Rewriting the Host Header'
2019-03-03 21:57:24 +08:00
xufanglu
178d134f46 Update instruction of 'Rewriting the Host Header'
Update instruction of 'Rewriting the Host Header' in README.md
2019-03-02 21:33:23 +08:00
fatedier
cbf9c731a0 Merge pull request #1088 from fatedier/dev
bump version to v0.24.1
2019-02-12 15:10:43 +08:00
fatedier
de4bfcc43c bump version to v0.24.1 2019-02-12 15:03:40 +08:00
fatedier
9737978f28 Merge pull request #1087 from fatedier/fix
fix PUT /api/config without token
2019-02-12 15:03:00 +08:00
fatedier
5bc7fe2cea fix PUT /api/config without token 2019-02-12 14:59:30 +08:00
715 changed files with 285048 additions and 6964 deletions

View File

@@ -1,5 +1,7 @@
Issue is only used for submiting bug report and documents typo. If there are same issues or answers can be found in documents, we will close it directly. Issue is only used for submiting bug report and documents typo. If there are same issues or answers can be found in documents, we will close it directly.
(为了节约时间,提高处理问题的效率,不按照格式填写的 issue 将会直接关闭。) (为了节约时间,提高处理问题的效率,不按照格式填写的 issue 将会直接关闭。)
(请不要在 issue 评论中出现无意义的 **加1****我也是** 等内容,将会被直接删除。)
(由于个人精力有限,和系统环境,网络环境等相关的求助问题请转至其他论坛或社交平台。)
Use the commands below to provide key information from your environment: Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST You do NOT have to include this information if this is a FEATURE REQUEST

View File

@@ -2,8 +2,7 @@ sudo: false
language: go language: go
go: go:
- 1.10.x - 1.12.x
- 1.11.x
install: install:
- make - make

View File

@@ -22,14 +22,17 @@ Now it also try to support p2p connect.
* [Forward DNS query request](#forward-dns-query-request) * [Forward DNS query request](#forward-dns-query-request)
* [Forward unix domain socket](#forward-unix-domain-socket) * [Forward unix domain socket](#forward-unix-domain-socket)
* [Expose a simple http file server](#expose-a-simple-http-file-server) * [Expose a simple http file server](#expose-a-simple-http-file-server)
* [Enable HTTPS for local HTTP service](#enable-https-for-local-http-service)
* [Expose your service in security](#expose-your-service-in-security) * [Expose your service in security](#expose-your-service-in-security)
* [P2P Mode](#p2p-mode) * [P2P Mode](#p2p-mode)
* [Features](#features) * [Features](#features)
* [Configuration File](#configuration-file) * [Configuration File](#configuration-file)
* [Configuration file template](#configuration-file-template) * [Configuration file template](#configuration-file-template)
* [Dashboard](#dashboard) * [Dashboard](#dashboard)
* [Admin UI](#admin-ui)
* [Authentication](#authentication) * [Authentication](#authentication)
* [Encryption and Compression](#encryption-and-compression) * [Encryption and Compression](#encryption-and-compression)
* [TLS](#tls)
* [Hot-Reload frpc configuration](#hot-reload-frpc-configuration) * [Hot-Reload frpc configuration](#hot-reload-frpc-configuration)
* [Get proxy status from client](#get-proxy-status-from-client) * [Get proxy status from client](#get-proxy-status-from-client)
* [Port White List](#port-white-list) * [Port White List](#port-white-list)
@@ -42,6 +45,8 @@ Now it also try to support p2p connect.
* [Rewriting the Host Header](#rewriting-the-host-header) * [Rewriting the Host Header](#rewriting-the-host-header)
* [Set Headers In HTTP Request](#set-headers-in-http-request) * [Set Headers In HTTP Request](#set-headers-in-http-request)
* [Get Real IP](#get-real-ip) * [Get Real IP](#get-real-ip)
* [HTTP X-Forwarded-For](#http-x-forwarded-for)
* [Proxy Protocol](#proxy-protocol)
* [Password protecting your web service](#password-protecting-your-web-service) * [Password protecting your web service](#password-protecting-your-web-service)
* [Custom subdomain names](#custom-subdomain-names) * [Custom subdomain names](#custom-subdomain-names)
* [URL routing](#url-routing) * [URL routing](#url-routing)
@@ -241,11 +246,34 @@ Configure frps same as above.
2. Visit `http://x.x.x.x:6000/static/` by your browser, set correct user and password, so you can see files in `/tmp/file`. 2. Visit `http://x.x.x.x:6000/static/` by your browser, set correct user and password, so you can see files in `/tmp/file`.
### Enable HTTPS for local HTTP service
1. Start frpc with configurations:
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
server_port = 7000
[test_htts2http]
type = https
custom_domains = test.yourdomain.com
plugin = https2http
plugin_local_addr = 127.0.0.1:80
plugin_crt_path = ./server.crt
plugin_key_path = ./server.key
plugin_host_header_rewrite = 127.0.0.1
```
2. Visit `https://test.yourdomain.com`.
### Expose your service in security ### Expose your service in security
For some services, if expose them to the public network directly will be a security risk. For some services, if expose them to the public network directly will be a security risk.
**stcp(secret tcp)** help you create a proxy avoiding any one can access it. **stcp(secret tcp)** helps you create a proxy avoiding any one can access it.
Configure frps same as above. Configure frps same as above.
@@ -389,6 +417,22 @@ Then visit `http://[server_addr]:7500` to see dashboard, default username and pa
![dashboard](/doc/pic/dashboard.png) ![dashboard](/doc/pic/dashboard.png)
### Admin UI
Admin UI help you check and manage frpc's configure.
Configure a address for admin UI to enable this feature:
```ini
[common]
admin_addr = 127.0.0.1
admin_port = 7400
admin_user = admin
admin_pwd = admin
```
Then visit `http://127.0.0.1:7400` to see admin UI, default username and password are both `admin`.
### Authentication ### Authentication
`token` in frps.ini and frpc.ini should be same. `token` in frps.ini and frpc.ini should be same.
@@ -407,6 +451,14 @@ use_encryption = true
use_compression = true use_compression = true
``` ```
#### TLS
frp support TLS protocol between frpc and frps since v0.25.0.
Config `tls_enable = true` in `common` section to frpc.ini to enable this feature.
For port multiplexing, frp send a first byte 0x17 to dial a TLS connection.
### Hot-Reload frpc configuration ### Hot-Reload frpc configuration
First you need to set admin port in frpc's configure file to let it provide HTTP API for more features. First you need to set admin port in frpc's configure file to let it provide HTTP API for more features.
@@ -458,8 +510,6 @@ tcp_mux = false
### Support KCP Protocol ### Support KCP Protocol
frp support kcp protocol since v0.12.0.
KCP is a fast and reliable protocol that can achieve the transmission effect of a reduction of the average latency by 30% to 40% and reduction of the maximum delay by a factor of three, at the cost of 10% to 20% more bandwidth wasted than TCP. KCP is a fast and reliable protocol that can achieve the transmission effect of a reduction of the average latency by 30% to 40% and reduction of the maximum delay by a factor of three, at the cost of 10% to 20% more bandwidth wasted than TCP.
Using kcp in frp: Using kcp in frp:
@@ -510,7 +560,8 @@ This feature is fit for a large number of short connections.
### Load balancing ### Load balancing
Load balancing is supported by `group`. Load balancing is supported by `group`.
This feature is available only for type `tcp` now.
This feature is available only for type `tcp` and `http` now.
```ini ```ini
# frpc.ini # frpc.ini
@@ -533,6 +584,10 @@ group_key = 123
Proxies in same group will accept connections from port 80 randomly. Proxies in same group will accept connections from port 80 randomly.
For `tcp` type, `remote_port` in one group shoud be same.
For `http` type, `custom_domains, subdomain, locations` shoud be same.
### Health Check ### Health Check
Health check feature can help you achieve high availability with load balancing. Health check feature can help you achieve high availability with load balancing.
@@ -592,7 +647,7 @@ custom_domains = test.yourdomain.com
host_header_rewrite = dev.yourdomain.com host_header_rewrite = dev.yourdomain.com
``` ```
If `host_header_rewrite` is specified, the host header will be rewritten to match the hostname portion of the forwarding address. The `Host` request header will be rewritten to `Host: dev.yourdomain.com` before it reach your local http server.
### Set Headers In HTTP Request ### Set Headers In HTTP Request
@@ -613,9 +668,32 @@ In this example, it will set header `X-From-Where: frp` to http request.
### Get Real IP ### Get Real IP
#### HTTP X-Forwarded-For
Features for http proxy only. Features for http proxy only.
You can get user's real IP from http request header `X-Forwarded-For` and `X-Real-IP`. You can get user's real IP from HTTP request header `X-Forwarded-For` and `X-Real-IP`.
#### Proxy Protocol
frp support Proxy Protocol to send user's real IP to local service. It support all types without UDP.
Here is an example for https service:
```ini
# frpc.ini
[web]
type = https
local_port = 443
custom_domains = test.yourdomain.com
# now v1 and v2 is supported
proxy_protocol_version = v2
```
You can enable Proxy Protocol support in nginx to parse user's real IP to http header `X-Real-IP`.
Then you can get it from HTTP request header in your local service.
### Password protecting your web service ### Password protecting your web service
@@ -736,8 +814,6 @@ plugin_http_passwd = abc
## Development Plan ## Development Plan
* Log http request information in frps. * Log http request information in frps.
* Direct reverse proxy, like haproxy.
* kubernetes ingress support.
## Contributing ## Contributing

View File

@@ -16,16 +16,19 @@ frp 是一个可用于内网穿透的高性能的反向代理应用,支持 tcp
* [通过 ssh 访问公司内网机器](#通过-ssh-访问公司内网机器) * [通过 ssh 访问公司内网机器](#通过-ssh-访问公司内网机器)
* [通过自定义域名访问部署于内网的 web 服务](#通过自定义域名访问部署于内网的-web-服务) * [通过自定义域名访问部署于内网的 web 服务](#通过自定义域名访问部署于内网的-web-服务)
* [转发 DNS 查询请求](#转发-dns-查询请求) * [转发 DNS 查询请求](#转发-dns-查询请求)
* [转发 Unix域套接字](#转发-unix域套接字) * [转发 Unix 域套接字](#转发-unix-域套接字)
* [对外提供简单的文件访问服务](#对外提供简单的文件访问服务) * [对外提供简单的文件访问服务](#对外提供简单的文件访问服务)
* [为本地 HTTP 服务启用 HTTPS](#为本地-http-服务启用-https)
* [安全地暴露内网服务](#安全地暴露内网服务) * [安全地暴露内网服务](#安全地暴露内网服务)
* [点对点内网穿透](#点对点内网穿透) * [点对点内网穿透](#点对点内网穿透)
* [功能说明](#功能说明) * [功能说明](#功能说明)
* [配置文件](#配置文件) * [配置文件](#配置文件)
* [配置文件模版渲染](#配置文件模版渲染) * [配置文件模版渲染](#配置文件模版渲染)
* [Dashboard](#dashboard) * [Dashboard](#dashboard)
* [Admin UI](#admin-ui)
* [身份验证](#身份验证) * [身份验证](#身份验证)
* [加密与压缩](#加密与压缩) * [加密与压缩](#加密与压缩)
* [TLS](#tls)
* [客户端热加载配置文件](#客户端热加载配置文件) * [客户端热加载配置文件](#客户端热加载配置文件)
* [客户端查看代理状态](#客户端查看代理状态) * [客户端查看代理状态](#客户端查看代理状态)
* [端口白名单](#端口白名单) * [端口白名单](#端口白名单)
@@ -38,6 +41,8 @@ frp 是一个可用于内网穿透的高性能的反向代理应用,支持 tcp
* [修改 Host Header](#修改-host-header) * [修改 Host Header](#修改-host-header)
* [设置 HTTP 请求的 header](#设置-http-请求的-header) * [设置 HTTP 请求的 header](#设置-http-请求的-header)
* [获取用户真实 IP](#获取用户真实-ip) * [获取用户真实 IP](#获取用户真实-ip)
* [HTTP X-Forwarded-For](#http-x-forwarded-for)
* [Proxy Protocol](#proxy-protocol)
* [通过密码保护你的 web 服务](#通过密码保护你的-web-服务) * [通过密码保护你的 web 服务](#通过密码保护你的-web-服务)
* [自定义二级域名](#自定义二级域名) * [自定义二级域名](#自定义二级域名)
* [URL 路由](#url-路由) * [URL 路由](#url-路由)
@@ -47,6 +52,7 @@ frp 是一个可用于内网穿透的高性能的反向代理应用,支持 tcp
* [开发计划](#开发计划) * [开发计划](#开发计划)
* [为 frp 做贡献](#为-frp-做贡献) * [为 frp 做贡献](#为-frp-做贡献)
* [捐助](#捐助) * [捐助](#捐助)
* [知识星球](#知识星球)
* [支付宝扫码捐赠](#支付宝扫码捐赠) * [支付宝扫码捐赠](#支付宝扫码捐赠)
* [微信支付捐赠](#微信支付捐赠) * [微信支付捐赠](#微信支付捐赠)
* [Paypal 捐赠](#paypal-捐赠) * [Paypal 捐赠](#paypal-捐赠)
@@ -188,7 +194,7 @@ DNS 查询请求通常使用 UDP 协议frp 支持对内网 UDP 服务的穿
`dig @x.x.x.x -p 6000 www.google.com` `dig @x.x.x.x -p 6000 www.google.com`
### 转发 Unix域套接字 ### 转发 Unix 域套接字
通过 tcp 端口访问内网的 unix域套接字(例如和 docker daemon 通信)。 通过 tcp 端口访问内网的 unix域套接字(例如和 docker daemon 通信)。
@@ -241,6 +247,33 @@ frps 的部署步骤同上。
2. 通过浏览器访问 `http://x.x.x.x:6000/static/` 来查看位于 `/tmp/file` 目录下的文件,会要求输入已设置好的用户名和密码。 2. 通过浏览器访问 `http://x.x.x.x:6000/static/` 来查看位于 `/tmp/file` 目录下的文件,会要求输入已设置好的用户名和密码。
### 为本地 HTTP 服务启用 HTTPS
通过 `https2http` 插件可以让本地 HTTP 服务转换成 HTTPS 服务对外提供。
1. 启用 frpc启用 `https2http` 插件,配置如下:
```ini
# frpc.ini
[common]
server_addr = x.x.x.x
server_port = 7000
[test_htts2http]
type = https
custom_domains = test.yourdomain.com
plugin = https2http
plugin_local_addr = 127.0.0.1:80
# HTTPS 证书相关的配置
plugin_crt_path = ./server.crt
plugin_key_path = ./server.key
plugin_host_header_rewrite = 127.0.0.1
```
2. 通过浏览器访问 `https://test.yourdomain.com` 即可。
### 安全地暴露内网服务 ### 安全地暴露内网服务
对于某些服务来说如果直接暴露于公网上将会存在安全隐患。 对于某些服务来说如果直接暴露于公网上将会存在安全隐患。
@@ -404,6 +437,24 @@ dashboard_pwd = admin
![dashboard](/doc/pic/dashboard.png) ![dashboard](/doc/pic/dashboard.png)
### Admin UI
Admin UI 可以帮助用户通过浏览器来查询和管理客户端的 proxy 状态和配置。
需要在 frpc.ini 中指定 admin 服务使用的端口,即可开启此功能:
```ini
[common]
admin_addr = 127.0.0.1
admin_port = 7400
admin_user = admin
admin_pwd = admin
```
打开浏览器通过 `http://127.0.0.1:7400` 访问 Admin UI用户名密码默认为 `admin`。
如果想要在外网环境访问 Admin UI将 7400 端口映射出去即可,但需要重视安全风险。
### 身份验证 ### 身份验证
服务端和客户端的 common 配置中的 `token` 参数一致则身份验证通过。 服务端和客户端的 common 配置中的 `token` 参数一致则身份验证通过。
@@ -426,6 +477,14 @@ use_compression = true
如果传输的报文长度较长,通过设置 `use_compression = true` 对传输内容进行压缩,可以有效减小 frpc 与 frps 之间的网络流量,加快流量转发速度,但是会额外消耗一些 cpu 资源。 如果传输的报文长度较长,通过设置 `use_compression = true` 对传输内容进行压缩,可以有效减小 frpc 与 frps 之间的网络流量,加快流量转发速度,但是会额外消耗一些 cpu 资源。
#### TLS
从 v0.25.0 版本开始 frpc 和 frps 之间支持通过 TLS 协议加密传输。通过在 `frpc.ini` 的 `common` 中配置 `tls_enable = true` 来启用此功能,安全性更高。
为了端口复用frp 建立 TLS 连接的第一个字节为 0x17。
**注意: 启用此功能后除 xtcp 外,不需要再设置 use_encryption。**
### 客户端热加载配置文件 ### 客户端热加载配置文件
当修改了 frpc 中的代理配置,可以通过 `frpc reload` 命令来动态加载配置文件,通常会在 10 秒内完成代理的更新。 当修改了 frpc 中的代理配置,可以通过 `frpc reload` 命令来动态加载配置文件,通常会在 10 秒内完成代理的更新。
@@ -485,7 +544,7 @@ tcp_mux = false
### 底层通信可选 kcp 协议 ### 底层通信可选 kcp 协议
从 v0.12.0 版本开始,底层通信协议支持选择 kcp 协议,在弱网环境下传输效率提升明显,但是会有一些额外的流量消耗。 底层通信协议支持选择 kcp 协议,在弱网环境下传输效率提升明显,但是会有一些额外的流量消耗。
开启 kcp 协议支持: 开启 kcp 协议支持:
@@ -537,7 +596,8 @@ tcp_mux = false
### 负载均衡 ### 负载均衡
可以将多个相同类型的 proxy 加入到同一个 group 中,从而实现负载均衡的功能。 可以将多个相同类型的 proxy 加入到同一个 group 中,从而实现负载均衡的功能。
目前只支持 tcp 类型的 proxy。
目前只支持 TCP 和 HTTP 类型的 proxy。
```ini ```ini
# frpc.ini # frpc.ini
@@ -558,7 +618,9 @@ group_key = 123
用户连接 frps 服务器的 80 端口frps 会将接收到的用户连接随机分发给其中一个存活的 proxy。这样可以在一台 frpc 机器挂掉后仍然有其他节点能够提供服务。 用户连接 frps 服务器的 80 端口frps 会将接收到的用户连接随机分发给其中一个存活的 proxy。这样可以在一台 frpc 机器挂掉后仍然有其他节点能够提供服务。
要求 `group_key` 相同,做权限验证,且 `remote_port` 相同。 TCP 类型代理要求 `group_key` 相同,做权限验证,且 `remote_port` 相同。
HTTP 类型代理要求 `group_key, custom_domains 或 subdomain 和 locations` 相同。
### 健康检查 ### 健康检查
@@ -639,7 +701,34 @@ header_X-From-Where = frp
### 获取用户真实 IP ### 获取用户真实 IP
目前只有 **http** 类型的代理支持这一功能,可以通过用户请求的 header 中的 `X-Forwarded-For` 和 `X-Real-IP` 来获取用户真实 IP。 #### HTTP X-Forwarded-For
目前只有 **http** 类型的代理支持这一功能,可以通过用户请求的 header 中的 `X-Forwarded-For` 来获取用户真实 IP默认启用。
#### Proxy Protocol
frp 支持通过 **Proxy Protocol** 协议来传递经过 frp 代理的请求的真实 IP此功能支持所有以 TCP 为底层协议的类型,不支持 UDP。
**Proxy Protocol** 功能启用后frpc 在和本地服务建立连接后,会先发送一段 **Proxy Protocol** 的协议内容给本地服务,本地服务通过解析这一内容可以获得访问用户的真实 IP。所以不仅仅是 HTTP 服务,任何的 TCP 服务,只要支持这一协议,都可以获得用户的真实 IP 地址。
需要注意的是,在代理配置中如果要启用此功能,需要本地的服务能够支持 **Proxy Protocol** 这一协议,目前 nginx 和 haproxy 都能够很好的支持。
这里以 https 类型为例:
```ini
# frpc.ini
[web]
type = https
local_port = 443
custom_domains = test.yourdomain.com
# 目前支持 v1 和 v2 两个版本的 proxy protocol 协议。
proxy_protocol_version = v2
```
只需要在代理配置中增加一行 `proxy_protocol_version = v2` 即可开启此功能。
本地的 https 服务可以通过在 nginx 的配置中启用 **Proxy Protocol** 的解析并将结果设置在 `X-Real-IP` 这个 Header 中就可以在自己的 Web 服务中通过 `X-Real-IP` 获取到用户的真实 IP。
### 通过密码保护你的 web 服务 ### 通过密码保护你的 web 服务

View File

@@ -311,6 +311,8 @@ func (svr *Service) apiPutConfig(w http.ResponseWriter, r *http.Request) {
newRows = append(newRows, token) newRows = append(newRows, token)
} }
} }
} else {
newRows = tmpRows
} }
content = strings.Join(newRows, "\n") content = strings.Join(newRows, "\n")

View File

@@ -15,6 +15,7 @@
package client package client
import ( import (
"crypto/tls"
"fmt" "fmt"
"io" "io"
"runtime/debug" "runtime/debug"
@@ -130,7 +131,7 @@ func (ctl *Control) HandleReqWorkConn(inMsg *msg.ReqWorkConn) {
workConn.AddLogPrefix(startMsg.ProxyName) workConn.AddLogPrefix(startMsg.ProxyName)
// dispatch this work connection to related proxy // dispatch this work connection to related proxy
ctl.pm.HandleWorkConn(startMsg.ProxyName, workConn) ctl.pm.HandleWorkConn(startMsg.ProxyName, workConn, &startMsg)
} }
func (ctl *Control) HandleNewProxyResp(inMsg *msg.NewProxyResp) { func (ctl *Control) HandleNewProxyResp(inMsg *msg.NewProxyResp) {
@@ -147,6 +148,9 @@ func (ctl *Control) HandleNewProxyResp(inMsg *msg.NewProxyResp) {
func (ctl *Control) Close() error { func (ctl *Control) Close() error {
ctl.pm.Close() ctl.pm.Close()
ctl.conn.Close() ctl.conn.Close()
if ctl.session != nil {
ctl.session.Close()
}
return nil return nil
} }
@@ -166,8 +170,14 @@ func (ctl *Control) connectServer() (conn frpNet.Conn, err error) {
} }
conn = frpNet.WrapConn(stream) conn = frpNet.WrapConn(stream)
} else { } else {
conn, err = frpNet.ConnectServerByProxy(g.GlbClientCfg.HttpProxy, g.GlbClientCfg.Protocol, var tlsConfig *tls.Config
fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerPort)) if g.GlbClientCfg.TLSEnable {
tlsConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
conn, err = frpNet.ConnectServerByProxyWithTLS(g.GlbClientCfg.HttpProxy, g.GlbClientCfg.Protocol,
fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerPort), tlsConfig)
if err != nil { if err != nil {
ctl.Warn("start new connection to server error: %v", err) ctl.Warn("start new connection to server error: %v", err)
return return
@@ -195,6 +205,7 @@ func (ctl *Control) reader() {
return return
} else { } else {
ctl.Warn("read error: %v", err) ctl.Warn("read error: %v", err)
ctl.conn.Close()
return return
} }
} else { } else {
@@ -293,6 +304,9 @@ func (ctl *Control) worker() {
ctl.vm.Close() ctl.vm.Close()
close(ctl.closedDoneCh) close(ctl.closedDoneCh)
if ctl.session != nil {
ctl.session.Close()
}
return return
} }
} }

View File

@@ -18,6 +18,8 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io"
"io/ioutil"
"net" "net"
"net/http" "net/http"
"time" "time"
@@ -94,12 +96,12 @@ func (monitor *HealthCheckMonitor) Stop() {
func (monitor *HealthCheckMonitor) checkWorker() { func (monitor *HealthCheckMonitor) checkWorker() {
for { for {
ctx, cancel := context.WithDeadline(monitor.ctx, time.Now().Add(monitor.timeout)) doCtx, cancel := context.WithDeadline(monitor.ctx, time.Now().Add(monitor.timeout))
err := monitor.doCheck(ctx) err := monitor.doCheck(doCtx)
// check if this monitor has been closed // check if this monitor has been closed
select { select {
case <-ctx.Done(): case <-monitor.ctx.Done():
cancel() cancel()
return return
default: default:
@@ -170,6 +172,8 @@ func (monitor *HealthCheckMonitor) doHttpCheck(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
if resp.StatusCode/100 != 2 { if resp.StatusCode/100 != 2 {
return fmt.Errorf("do http health check, StatusCode is [%d] not 2xx", resp.StatusCode) return fmt.Errorf("do http health check, StatusCode is [%d] not 2xx", resp.StatusCode)

View File

@@ -18,7 +18,10 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net" "net"
"strconv"
"strings"
"sync" "sync"
"time" "time"
@@ -33,6 +36,8 @@ import (
"github.com/fatedier/golib/errors" "github.com/fatedier/golib/errors"
frpIo "github.com/fatedier/golib/io" frpIo "github.com/fatedier/golib/io"
"github.com/fatedier/golib/pool" "github.com/fatedier/golib/pool"
fmux "github.com/hashicorp/yamux"
pp "github.com/pires/go-proxyproto"
) )
// Proxy defines how to handle work connections for different proxy type. // Proxy defines how to handle work connections for different proxy type.
@@ -40,7 +45,7 @@ type Proxy interface {
Run() error Run() error
// InWorkConn accept work connections registered to server. // InWorkConn accept work connections registered to server.
InWorkConn(conn frpNet.Conn) InWorkConn(frpNet.Conn, *msg.StartWorkConn)
Close() Close()
log.Logger log.Logger
@@ -115,9 +120,9 @@ func (pxy *TcpProxy) Close() {
} }
} }
func (pxy *TcpProxy) InWorkConn(conn frpNet.Conn) { func (pxy *TcpProxy) InWorkConn(conn frpNet.Conn, m *msg.StartWorkConn) {
HandleTcpWorkConnection(&pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf, conn, HandleTcpWorkConnection(&pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf, conn,
[]byte(g.GlbClientCfg.Token)) []byte(g.GlbClientCfg.Token), m)
} }
// HTTP // HTTP
@@ -144,9 +149,9 @@ func (pxy *HttpProxy) Close() {
} }
} }
func (pxy *HttpProxy) InWorkConn(conn frpNet.Conn) { func (pxy *HttpProxy) InWorkConn(conn frpNet.Conn, m *msg.StartWorkConn) {
HandleTcpWorkConnection(&pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf, conn, HandleTcpWorkConnection(&pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf, conn,
[]byte(g.GlbClientCfg.Token)) []byte(g.GlbClientCfg.Token), m)
} }
// HTTPS // HTTPS
@@ -173,9 +178,9 @@ func (pxy *HttpsProxy) Close() {
} }
} }
func (pxy *HttpsProxy) InWorkConn(conn frpNet.Conn) { func (pxy *HttpsProxy) InWorkConn(conn frpNet.Conn, m *msg.StartWorkConn) {
HandleTcpWorkConnection(&pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf, conn, HandleTcpWorkConnection(&pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf, conn,
[]byte(g.GlbClientCfg.Token)) []byte(g.GlbClientCfg.Token), m)
} }
// STCP // STCP
@@ -202,9 +207,9 @@ func (pxy *StcpProxy) Close() {
} }
} }
func (pxy *StcpProxy) InWorkConn(conn frpNet.Conn) { func (pxy *StcpProxy) InWorkConn(conn frpNet.Conn, m *msg.StartWorkConn) {
HandleTcpWorkConnection(&pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf, conn, HandleTcpWorkConnection(&pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf, conn,
[]byte(g.GlbClientCfg.Token)) []byte(g.GlbClientCfg.Token), m)
} }
// XTCP // XTCP
@@ -231,7 +236,7 @@ func (pxy *XtcpProxy) Close() {
} }
} }
func (pxy *XtcpProxy) InWorkConn(conn frpNet.Conn) { func (pxy *XtcpProxy) InWorkConn(conn frpNet.Conn, m *msg.StartWorkConn) {
defer conn.Close() defer conn.Close()
var natHoleSidMsg msg.NatHoleSid var natHoleSidMsg msg.NatHoleSid
err := msg.ReadMsgInto(conn, &natHoleSidMsg) err := msg.ReadMsgInto(conn, &natHoleSidMsg)
@@ -278,32 +283,97 @@ func (pxy *XtcpProxy) InWorkConn(conn frpNet.Conn) {
return return
} }
pxy.Trace("get natHoleRespMsg, sid [%s], client address [%s]", natHoleRespMsg.Sid, natHoleRespMsg.ClientAddr) pxy.Trace("get natHoleRespMsg, sid [%s], client address [%s] visitor address [%s]", natHoleRespMsg.Sid, natHoleRespMsg.ClientAddr, natHoleRespMsg.VisitorAddr)
// Send sid to visitor udp address. // Send detect message
time.Sleep(time.Second) array := strings.Split(natHoleRespMsg.VisitorAddr, ":")
if len(array) <= 1 {
pxy.Error("get NatHoleResp visitor address error: %v", natHoleRespMsg.VisitorAddr)
}
laddr, _ := net.ResolveUDPAddr("udp", clientConn.LocalAddr().String()) laddr, _ := net.ResolveUDPAddr("udp", clientConn.LocalAddr().String())
daddr, err := net.ResolveUDPAddr("udp", natHoleRespMsg.VisitorAddr) /*
for i := 1000; i < 65000; i++ {
pxy.sendDetectMsg(array[0], int64(i), laddr, "a")
}
*/
port, err := strconv.ParseInt(array[1], 10, 64)
if err != nil { if err != nil {
pxy.Error("resolve visitor udp address error: %v", err) pxy.Error("get natHoleResp visitor address error: %v", natHoleRespMsg.VisitorAddr)
return return
} }
pxy.sendDetectMsg(array[0], int(port), laddr, []byte(natHoleRespMsg.Sid))
pxy.Trace("send all detect msg done")
lConn, err := net.DialUDP("udp", laddr, daddr) msg.WriteMsg(conn, &msg.NatHoleClientDetectOK{})
// Listen for clientConn's address and wait for visitor connection
lConn, err := net.ListenUDP("udp", laddr)
if err != nil { if err != nil {
pxy.Error("dial visitor udp address error: %v", err) pxy.Error("listen on visitorConn's local adress error: %v", err)
return return
} }
lConn.Write([]byte(natHoleRespMsg.Sid)) defer lConn.Close()
kcpConn, err := frpNet.NewKcpConnFromUdp(lConn, true, natHoleRespMsg.VisitorAddr) lConn.SetReadDeadline(time.Now().Add(8 * time.Second))
sidBuf := pool.GetBuf(1024)
var uAddr *net.UDPAddr
n, uAddr, err = lConn.ReadFromUDP(sidBuf)
if err != nil {
pxy.Warn("get sid from visitor error: %v", err)
return
}
lConn.SetReadDeadline(time.Time{})
if string(sidBuf[:n]) != natHoleRespMsg.Sid {
pxy.Warn("incorrect sid from visitor")
return
}
pool.PutBuf(sidBuf)
pxy.Info("nat hole connection make success, sid [%s]", natHoleRespMsg.Sid)
lConn.WriteToUDP(sidBuf[:n], uAddr)
kcpConn, err := frpNet.NewKcpConnFromUdp(lConn, false, natHoleRespMsg.VisitorAddr)
if err != nil { if err != nil {
pxy.Error("create kcp connection from udp connection error: %v", err) pxy.Error("create kcp connection from udp connection error: %v", err)
return return
} }
fmuxCfg := fmux.DefaultConfig()
fmuxCfg.KeepAliveInterval = 5 * time.Second
fmuxCfg.LogOutput = ioutil.Discard
sess, err := fmux.Server(kcpConn, fmuxCfg)
if err != nil {
pxy.Error("create yamux server from kcp connection error: %v", err)
return
}
defer sess.Close()
muxConn, err := sess.Accept()
if err != nil {
pxy.Error("accept for yamux connection error: %v", err)
return
}
HandleTcpWorkConnection(&pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf, HandleTcpWorkConnection(&pxy.cfg.LocalSvrConf, pxy.proxyPlugin, &pxy.cfg.BaseProxyConf,
frpNet.WrapConn(kcpConn), []byte(pxy.cfg.Sk)) frpNet.WrapConn(muxConn), []byte(pxy.cfg.Sk), m)
}
func (pxy *XtcpProxy) sendDetectMsg(addr string, port int, laddr *net.UDPAddr, content []byte) (err error) {
daddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", addr, port))
if err != nil {
return err
}
tConn, err := net.DialUDP("udp", laddr, daddr)
if err != nil {
return err
}
//uConn := ipv4.NewConn(tConn)
//uConn.SetTTL(3)
tConn.Write(content)
tConn.Close()
return nil
} }
// UDP // UDP
@@ -346,7 +416,7 @@ func (pxy *UdpProxy) Close() {
} }
} }
func (pxy *UdpProxy) InWorkConn(conn frpNet.Conn) { func (pxy *UdpProxy) InWorkConn(conn frpNet.Conn, m *msg.StartWorkConn) {
pxy.Info("incoming a new work connection for udp proxy, %s", conn.RemoteAddr().String()) pxy.Info("incoming a new work connection for udp proxy, %s", conn.RemoteAddr().String())
// close resources releated with old workConn // close resources releated with old workConn
pxy.Close() pxy.Close()
@@ -413,7 +483,7 @@ func (pxy *UdpProxy) InWorkConn(conn frpNet.Conn) {
// Common handler for tcp work connections. // Common handler for tcp work connections.
func HandleTcpWorkConnection(localInfo *config.LocalSvrConf, proxyPlugin plugin.Plugin, func HandleTcpWorkConnection(localInfo *config.LocalSvrConf, proxyPlugin plugin.Plugin,
baseInfo *config.BaseProxyConf, workConn frpNet.Conn, encKey []byte) { baseInfo *config.BaseProxyConf, workConn frpNet.Conn, encKey []byte, m *msg.StartWorkConn) {
var ( var (
remote io.ReadWriteCloser remote io.ReadWriteCloser
@@ -433,10 +503,43 @@ func HandleTcpWorkConnection(localInfo *config.LocalSvrConf, proxyPlugin plugin.
remote = frpIo.WithCompression(remote) remote = frpIo.WithCompression(remote)
} }
// check if we need to send proxy protocol info
var extraInfo []byte
if baseInfo.ProxyProtocolVersion != "" {
if m.SrcAddr != "" && m.SrcPort != 0 {
if m.DstAddr == "" {
m.DstAddr = "127.0.0.1"
}
h := &pp.Header{
Command: pp.PROXY,
SourceAddress: net.ParseIP(m.SrcAddr),
SourcePort: m.SrcPort,
DestinationAddress: net.ParseIP(m.DstAddr),
DestinationPort: m.DstPort,
}
if h.SourceAddress.To16() == nil {
h.TransportProtocol = pp.TCPv4
} else {
h.TransportProtocol = pp.TCPv6
}
if baseInfo.ProxyProtocolVersion == "v1" {
h.Version = 1
} else if baseInfo.ProxyProtocolVersion == "v2" {
h.Version = 2
}
buf := bytes.NewBuffer(nil)
h.WriteTo(buf)
extraInfo = buf.Bytes()
}
}
if proxyPlugin != nil { if proxyPlugin != nil {
// if plugin is set, let plugin handle connections first // if plugin is set, let plugin handle connections first
workConn.Debug("handle by plugin: %s", proxyPlugin.Name()) workConn.Debug("handle by plugin: %s", proxyPlugin.Name())
proxyPlugin.Handle(remote, workConn) proxyPlugin.Handle(remote, workConn, extraInfo)
workConn.Debug("handle by plugin finished") workConn.Debug("handle by plugin finished")
return return
} else { } else {
@@ -449,6 +552,11 @@ func HandleTcpWorkConnection(localInfo *config.LocalSvrConf, proxyPlugin plugin.
workConn.Debug("join connections, localConn(l[%s] r[%s]) workConn(l[%s] r[%s])", localConn.LocalAddr().String(), workConn.Debug("join connections, localConn(l[%s] r[%s]) workConn(l[%s] r[%s])", localConn.LocalAddr().String(),
localConn.RemoteAddr().String(), workConn.LocalAddr().String(), workConn.RemoteAddr().String()) localConn.RemoteAddr().String(), workConn.LocalAddr().String(), workConn.RemoteAddr().String())
if len(extraInfo) > 0 {
localConn.Write(extraInfo)
}
frpIo.Join(localConn, remote) frpIo.Join(localConn, remote)
workConn.Debug("join connections closed") workConn.Debug("join connections closed")
} }

View File

@@ -58,12 +58,12 @@ func (pm *ProxyManager) Close() {
pm.proxies = make(map[string]*ProxyWrapper) pm.proxies = make(map[string]*ProxyWrapper)
} }
func (pm *ProxyManager) HandleWorkConn(name string, workConn frpNet.Conn) { func (pm *ProxyManager) HandleWorkConn(name string, workConn frpNet.Conn, m *msg.StartWorkConn) {
pm.mu.RLock() pm.mu.RLock()
pw, ok := pm.proxies[name] pw, ok := pm.proxies[name]
pm.mu.RUnlock() pm.mu.RUnlock()
if ok { if ok {
pw.InWorkConn(workConn) pw.InWorkConn(workConn, m)
} else { } else {
workConn.Close() workConn.Close()
} }

View File

@@ -217,13 +217,13 @@ func (pw *ProxyWrapper) statusFailedCallback() {
pw.Info("health check failed") pw.Info("health check failed")
} }
func (pw *ProxyWrapper) InWorkConn(workConn frpNet.Conn) { func (pw *ProxyWrapper) InWorkConn(workConn frpNet.Conn, m *msg.StartWorkConn) {
pw.mu.RLock() pw.mu.RLock()
pxy := pw.pxy pxy := pw.pxy
pw.mu.RUnlock() pw.mu.RUnlock()
if pxy != nil { if pxy != nil {
workConn.Debug("start a new work connection, localAddr: %s remoteAddr: %s", workConn.LocalAddr().String(), workConn.RemoteAddr().String()) workConn.Debug("start a new work connection, localAddr: %s remoteAddr: %s", workConn.LocalAddr().String(), workConn.RemoteAddr().String())
go pxy.InWorkConn(workConn) go pxy.InWorkConn(workConn, m)
} else { } else {
workConn.Close() workConn.Close()
} }

View File

@@ -15,6 +15,7 @@
package client package client
import ( import (
"crypto/tls"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"runtime" "runtime"
@@ -151,8 +152,14 @@ func (svr *Service) keepControllerWorking() {
// conn: control connection // conn: control connection
// session: if it's not nil, using tcp mux // session: if it's not nil, using tcp mux
func (svr *Service) login() (conn frpNet.Conn, session *fmux.Session, err error) { func (svr *Service) login() (conn frpNet.Conn, session *fmux.Session, err error) {
conn, err = frpNet.ConnectServerByProxy(g.GlbClientCfg.HttpProxy, g.GlbClientCfg.Protocol, var tlsConfig *tls.Config
fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerPort)) if g.GlbClientCfg.TLSEnable {
tlsConfig = &tls.Config{
InsecureSkipVerify: true,
}
}
conn, err = frpNet.ConnectServerByProxyWithTLS(g.GlbClientCfg.HttpProxy, g.GlbClientCfg.Protocol,
fmt.Sprintf("%s:%d", g.GlbClientCfg.ServerAddr, g.GlbClientCfg.ServerPort), tlsConfig)
if err != nil { if err != nil {
return return
} }
@@ -160,6 +167,9 @@ func (svr *Service) login() (conn frpNet.Conn, session *fmux.Session, err error)
defer func() { defer func() {
if err != nil { if err != nil {
conn.Close() conn.Close()
if session != nil {
session.Close()
}
} }
}() }()

View File

@@ -18,14 +18,11 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net" "net"
"strconv"
"strings"
"sync" "sync"
"time" "time"
"golang.org/x/net/ipv4"
"github.com/fatedier/frp/g" "github.com/fatedier/frp/g"
"github.com/fatedier/frp/models/config" "github.com/fatedier/frp/models/config"
"github.com/fatedier/frp/models/msg" "github.com/fatedier/frp/models/msg"
@@ -35,6 +32,7 @@ import (
frpIo "github.com/fatedier/golib/io" frpIo "github.com/fatedier/golib/io"
"github.com/fatedier/golib/pool" "github.com/fatedier/golib/pool"
fmux "github.com/hashicorp/yamux"
) )
// Visitor is used for forward traffics from local port tot remote service. // Visitor is used for forward traffics from local port tot remote service.
@@ -249,40 +247,31 @@ func (sv *XtcpVisitor) handleConn(userConn frpNet.Conn) {
return return
} }
sv.Trace("get natHoleRespMsg, sid [%s], client address [%s]", natHoleRespMsg.Sid, natHoleRespMsg.ClientAddr) sv.Trace("get natHoleRespMsg, sid [%s], client address [%s], visitor address [%s]", natHoleRespMsg.Sid, natHoleRespMsg.ClientAddr, natHoleRespMsg.VisitorAddr)
// Close visitorConn, so we can use it's local address. // Close visitorConn, so we can use it's local address.
visitorConn.Close() visitorConn.Close()
// Send detect message. // send sid message to client
array := strings.Split(natHoleRespMsg.ClientAddr, ":")
if len(array) <= 1 {
sv.Error("get natHoleResp client address error: %s", natHoleRespMsg.ClientAddr)
return
}
laddr, _ := net.ResolveUDPAddr("udp", visitorConn.LocalAddr().String()) laddr, _ := net.ResolveUDPAddr("udp", visitorConn.LocalAddr().String())
/* daddr, err := net.ResolveUDPAddr("udp", natHoleRespMsg.ClientAddr)
for i := 1000; i < 65000; i++ {
sv.sendDetectMsg(array[0], int64(i), laddr, "a")
}
*/
port, err := strconv.ParseInt(array[1], 10, 64)
if err != nil { if err != nil {
sv.Error("get natHoleResp client address error: %s", natHoleRespMsg.ClientAddr) sv.Error("resolve client udp address error: %v", err)
return return
} }
sv.sendDetectMsg(array[0], int(port), laddr, []byte(natHoleRespMsg.Sid)) lConn, err := net.DialUDP("udp", laddr, daddr)
sv.Trace("send all detect msg done") if err != nil {
sv.Error("dial client udp address error: %v", err)
return
}
defer lConn.Close()
// Listen for visitorConn's address and wait for client connection. lConn.Write([]byte(natHoleRespMsg.Sid))
lConn, err := net.ListenUDP("udp", laddr)
if err != nil { // read ack sid from client
sv.Error("listen on visitorConn's local adress error: %v", err)
return
}
lConn.SetReadDeadline(time.Now().Add(5 * time.Second))
sidBuf := pool.GetBuf(1024) sidBuf := pool.GetBuf(1024)
n, _, err = lConn.ReadFromUDP(sidBuf) lConn.SetReadDeadline(time.Now().Add(8 * time.Second))
n, err = lConn.Read(sidBuf)
if err != nil { if err != nil {
sv.Warn("get sid from client error: %v", err) sv.Warn("get sid from client error: %v", err)
return return
@@ -292,11 +281,13 @@ func (sv *XtcpVisitor) handleConn(userConn frpNet.Conn) {
sv.Warn("incorrect sid from client") sv.Warn("incorrect sid from client")
return return
} }
sv.Info("nat hole connection make success, sid [%s]", string(sidBuf[:n]))
pool.PutBuf(sidBuf) pool.PutBuf(sidBuf)
sv.Info("nat hole connection make success, sid [%s]", natHoleRespMsg.Sid)
// wrap kcp connection
var remote io.ReadWriteCloser var remote io.ReadWriteCloser
remote, err = frpNet.NewKcpConnFromUdp(lConn, false, natHoleRespMsg.ClientAddr) remote, err = frpNet.NewKcpConnFromUdp(lConn, true, natHoleRespMsg.ClientAddr)
if err != nil { if err != nil {
sv.Error("create kcp connection from udp connection error: %v", err) sv.Error("create kcp connection from udp connection error: %v", err)
return return
@@ -314,25 +305,21 @@ func (sv *XtcpVisitor) handleConn(userConn frpNet.Conn) {
remote = frpIo.WithCompression(remote) remote = frpIo.WithCompression(remote)
} }
frpIo.Join(userConn, remote) fmuxCfg := fmux.DefaultConfig()
fmuxCfg.KeepAliveInterval = 5 * time.Second
fmuxCfg.LogOutput = ioutil.Discard
sess, err := fmux.Client(remote, fmuxCfg)
if err != nil {
sv.Error("create yamux session error: %v", err)
return
}
defer sess.Close()
muxConn, err := sess.Open()
if err != nil {
sv.Error("open yamux stream error: %v", err)
return
}
frpIo.Join(userConn, muxConn)
sv.Debug("join connections closed") sv.Debug("join connections closed")
} }
func (sv *XtcpVisitor) sendDetectMsg(addr string, port int, laddr *net.UDPAddr, content []byte) (err error) {
daddr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", addr, port))
if err != nil {
return err
}
tConn, err := net.DialUDP("udp", laddr, daddr)
if err != nil {
return err
}
uConn := ipv4.NewConn(tConn)
uConn.SetTTL(3)
tConn.Write(content)
tConn.Close()
return nil
}

View File

@@ -76,17 +76,16 @@ func reload() error {
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return err return err
} else {
if resp.StatusCode == 200 {
return nil
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return fmt.Errorf("code [%d], %s", resp.StatusCode, strings.TrimSpace(string(body)))
} }
return nil defer resp.Body.Close()
if resp.StatusCode == 200 {
return nil
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return fmt.Errorf("code [%d], %s", resp.StatusCode, strings.TrimSpace(string(body)))
} }

View File

@@ -73,7 +73,7 @@ var (
) )
func init() { func init() {
rootCmd.PersistentFlags().StringVarP(&cfgFile, "", "c", "./frpc.ini", "config file of frpc") rootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "./frpc.ini", "config file of frpc")
rootCmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "version of frpc") rootCmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "version of frpc")
kcpDoneCh = make(chan struct{}) kcpDoneCh = make(chan struct{})

View File

@@ -78,76 +78,78 @@ func status() error {
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return err return err
} else {
if resp.StatusCode != 200 {
return fmt.Errorf("admin api status code [%d]", resp.StatusCode)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
res := &client.StatusResp{}
err = json.Unmarshal(body, &res)
if err != nil {
return fmt.Errorf("unmarshal http response error: %s", strings.TrimSpace(string(body)))
}
fmt.Println("Proxy Status...")
if len(res.Tcp) > 0 {
fmt.Printf("TCP")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Tcp {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
if len(res.Udp) > 0 {
fmt.Printf("UDP")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Udp {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
if len(res.Http) > 0 {
fmt.Printf("HTTP")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Http {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
if len(res.Https) > 0 {
fmt.Printf("HTTPS")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Https {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
if len(res.Stcp) > 0 {
fmt.Printf("STCP")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Stcp {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
if len(res.Xtcp) > 0 {
fmt.Printf("XTCP")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Xtcp {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
} }
defer resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("admin api status code [%d]", resp.StatusCode)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
res := &client.StatusResp{}
err = json.Unmarshal(body, &res)
if err != nil {
return fmt.Errorf("unmarshal http response error: %s", strings.TrimSpace(string(body)))
}
fmt.Println("Proxy Status...")
if len(res.Tcp) > 0 {
fmt.Printf("TCP")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Tcp {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
if len(res.Udp) > 0 {
fmt.Printf("UDP")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Udp {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
if len(res.Http) > 0 {
fmt.Printf("HTTP")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Http {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
if len(res.Https) > 0 {
fmt.Printf("HTTPS")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Https {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
if len(res.Stcp) > 0 {
fmt.Printf("STCP")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Stcp {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
if len(res.Xtcp) > 0 {
fmt.Printf("XTCP")
tbl := table.New("Name", "Status", "LocalAddr", "Plugin", "RemoteAddr", "Error")
for _, ps := range res.Xtcp {
tbl.AddRow(ps.Name, ps.Status, ps.LocalAddr, ps.Plugin, ps.RemoteAddr, ps.Err)
}
tbl.Print()
fmt.Println("")
}
return nil return nil
} }

View File

@@ -68,7 +68,7 @@ var xtcpCmd = &cobra.Command{
if role == "server" { if role == "server" {
cfg := &config.XtcpProxyConf{} cfg := &config.XtcpProxyConf{}
cfg.ProxyName = prefix + proxyName cfg.ProxyName = prefix + proxyName
cfg.ProxyType = consts.StcpProxy cfg.ProxyType = consts.XtcpProxy
cfg.UseEncryption = useEncryption cfg.UseEncryption = useEncryption
cfg.UseCompression = useCompression cfg.UseCompression = useCompression
cfg.Role = role cfg.Role = role
@@ -84,7 +84,7 @@ var xtcpCmd = &cobra.Command{
} else if role == "visitor" { } else if role == "visitor" {
cfg := &config.XtcpVisitorConf{} cfg := &config.XtcpVisitorConf{}
cfg.ProxyName = prefix + proxyName cfg.ProxyName = prefix + proxyName
cfg.ProxyType = consts.StcpProxy cfg.ProxyType = consts.XtcpProxy
cfg.UseEncryption = useEncryption cfg.UseEncryption = useEncryption
cfg.UseCompression = useCompression cfg.UseCompression = useCompression
cfg.Role = role cfg.Role = role

View File

@@ -62,7 +62,7 @@ var (
) )
func init() { func init() {
rootCmd.PersistentFlags().StringVarP(&cfgFile, "", "c", "", "config file of frps") rootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file of frps")
rootCmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "version of frpc") rootCmd.PersistentFlags().BoolVarP(&showVersion, "version", "v", false, "version of frpc")
rootCmd.PersistentFlags().StringVarP(&bindAddr, "bind_addr", "", "0.0.0.0", "bind address") rootCmd.PersistentFlags().StringVarP(&bindAddr, "bind_addr", "", "0.0.0.0", "bind address")
@@ -79,7 +79,7 @@ func init() {
rootCmd.PersistentFlags().StringVarP(&dashboardPwd, "dashboard_pwd", "", "admin", "dashboard password") rootCmd.PersistentFlags().StringVarP(&dashboardPwd, "dashboard_pwd", "", "admin", "dashboard password")
rootCmd.PersistentFlags().StringVarP(&logFile, "log_file", "", "console", "log file") rootCmd.PersistentFlags().StringVarP(&logFile, "log_file", "", "console", "log file")
rootCmd.PersistentFlags().StringVarP(&logLevel, "log_level", "", "info", "log level") rootCmd.PersistentFlags().StringVarP(&logLevel, "log_level", "", "info", "log level")
rootCmd.PersistentFlags().Int64VarP(&logMaxDays, "log_max_days", "", 3, "log_max_days") rootCmd.PersistentFlags().Int64VarP(&logMaxDays, "log_max_days", "", 3, "log max days")
rootCmd.PersistentFlags().StringVarP(&token, "token", "t", "", "auth token") rootCmd.PersistentFlags().StringVarP(&token, "token", "t", "", "auth token")
rootCmd.PersistentFlags().StringVarP(&subDomainHost, "subdomain_host", "", "", "subdomain host") rootCmd.PersistentFlags().StringVarP(&subDomainHost, "subdomain_host", "", "", "subdomain host")
rootCmd.PersistentFlags().StringVarP(&allowPorts, "allow_ports", "", "", "allow ports") rootCmd.PersistentFlags().StringVarP(&allowPorts, "allow_ports", "", "", "allow ports")
@@ -187,9 +187,9 @@ func parseServerCommonCfgFromCmd() (err error) {
g.GlbServerCfg.MaxPortsPerClient = maxPortsPerClient g.GlbServerCfg.MaxPortsPerClient = maxPortsPerClient
if logFile == "console" { if logFile == "console" {
g.GlbClientCfg.LogWay = "console" g.GlbServerCfg.LogWay = "console"
} else { } else {
g.GlbClientCfg.LogWay = "file" g.GlbServerCfg.LogWay = "file"
} }
return return
} }

View File

@@ -44,10 +44,13 @@ login_fail_exit = true
# now it supports tcp and kcp and websocket, default is tcp # now it supports tcp and kcp and websocket, default is tcp
protocol = tcp protocol = tcp
# if tls_enable is true, frpc will connect frps by tls
tls_enable = true
# specify a dns server, so frpc will use this instead of default one # specify a dns server, so frpc will use this instead of default one
# dns_server = 8.8.8.8 # dns_server = 8.8.8.8
# proxy names you want to start divided by ',' # proxy names you want to start seperated by ','
# default is empty, means all proxies # default is empty, means all proxies
# start = ssh,dns # start = ssh,dns
@@ -151,6 +154,9 @@ use_encryption = false
use_compression = false use_compression = false
subdomain = web01 subdomain = web01
custom_domains = web02.yourdomain.com custom_domains = web02.yourdomain.com
# if not empty, frpc will use proxy protocol to transfer connection info to your local service
# v1 or v2 or empty
proxy_protocol_version = v2
[plugin_unix_domain_socket] [plugin_unix_domain_socket]
type = tcp type = tcp
@@ -184,6 +190,15 @@ plugin_strip_prefix = static
plugin_http_user = abc plugin_http_user = abc
plugin_http_passwd = abc plugin_http_passwd = abc
[plugin_https2http]
type = https
custom_domains = test.yourdomain.com
plugin = https2http
plugin_local_addr = 127.0.0.1:80
plugin_crt_path = ./server.crt
plugin_key_path = ./server.key
plugin_host_header_rewrite = 127.0.0.1
[secret_tcp] [secret_tcp]
# If the type is secret tcp, remote_port is useless # If the type is secret tcp, remote_port is useless
# Who want to connect local port should deploy another frpc with stcp proxy and role is visitor # Who want to connect local port should deploy another frpc with stcp proxy and role is visitor

View File

@@ -65,3 +65,6 @@ subdomain_host = frps.com
# if tcp stream multiplexing is used, default is true # if tcp stream multiplexing is used, default is true
tcp_mux = true tcp_mux = true
# custom 404 page for HTTP requests
# custom_404_page = /path/to/404.html

14
conf/systemd/frpc.service Normal file
View File

@@ -0,0 +1,14 @@
[Unit]
Description=Frp Client Service
After=network.target
[Service]
Type=simple
User=nobody
Restart=on-failure
RestartSec=5s
ExecStart=/usr/bin/frpc -c /etc/frp/frpc.ini
ExecReload=/usr/bin/frpc reload -c /etc/frp/frpc.ini
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,14 @@
[Unit]
Description=Frp Client Service
After=network.target
[Service]
Type=idle
User=nobody
Restart=on-failure
RestartSec=5s
ExecStart=/usr/bin/frpc -c /etc/frp/%i.ini
ExecReload=/usr/bin/frpc reload -c /etc/frp/%i.ini
[Install]
WantedBy=multi-user.target

13
conf/systemd/frps.service Normal file
View File

@@ -0,0 +1,13 @@
[Unit]
Description=Frp Server Service
After=network.target
[Service]
Type=simple
User=nobody
Restart=on-failure
RestartSec=5s
ExecStart=/usr/bin/frps -c /etc/frp/frps.ini
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,13 @@
[Unit]
Description=Frp Server Service
After=network.target
[Service]
Type=simple
User=nobody
Restart=on-failure
RestartSec=5s
ExecStart=/usr/bin/frps -c /etc/frp/%i.ini
[Install]
WantedBy=multi-user.target

22
go.mod
View File

@@ -4,29 +4,29 @@ go 1.12
require ( require (
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
github.com/davecgh/go-spew v1.1.0 // indirect
github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb
github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049 github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049
github.com/fatedier/kcp-go v0.0.0-20171023144637-cd167d2f15f4 github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 // indirect github.com/golang/snappy v0.0.0-20170215233205-553a64147049 // indirect
github.com/gorilla/context v1.1.1 // indirect github.com/gorilla/mux v1.7.3
github.com/gorilla/mux v1.6.2 github.com/gorilla/websocket v1.4.0
github.com/gorilla/websocket v1.2.0 github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d
github.com/hashicorp/yamux v0.0.0-20180314200745-2658be15c5f0
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/klauspost/cpuid v1.2.0 // indirect
github.com/klauspost/reedsolomon v1.9.1 // indirect
github.com/mattn/go-runewidth v0.0.4 // indirect github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc
github.com/pkg/errors v0.8.0 // indirect github.com/pkg/errors v0.8.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rakyll/statik v0.1.1 github.com/rakyll/statik v0.1.1
github.com/rodaine/table v1.0.0 github.com/rodaine/table v1.0.0
github.com/spf13/cobra v0.0.3 github.com/spf13/cobra v0.0.3
github.com/spf13/pflag v1.0.1 // indirect github.com/spf13/pflag v1.0.1 // indirect
github.com/stretchr/testify v1.2.1 github.com/stretchr/testify v1.3.0
github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047 // indirect github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047 // indirect
github.com/templexxx/reedsolomon v0.0.0-20170926020725-5e06b81a1c76 // indirect
github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554 // indirect github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554 // indirect
github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8 // indirect github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8 // indirect
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec
golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab // indirect github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae // indirect
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79 golang.org/x/net v0.0.0-20190724013045-ca1201d0de80
golang.org/x/text v0.3.2 // indirect
) )

51
go.sum
View File

@@ -1,30 +1,61 @@
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb h1:wCrNShQidLmvVWn/0PikGmpdP0vtQmnvyRg3ZBEhczw=
github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb/go.mod h1:wx3gB6dbIfBRcucp94PI9Bt3I0F2c/MyNEWuhzpWiwk= github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb/go.mod h1:wx3gB6dbIfBRcucp94PI9Bt3I0F2c/MyNEWuhzpWiwk=
github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049 h1:teH578mf2ii42NHhIp3PhgvjU5bv+NFMq9fSQR8NaG8= github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049 h1:teH578mf2ii42NHhIp3PhgvjU5bv+NFMq9fSQR8NaG8=
github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049/go.mod h1:DqIrnl0rp3Zybg9zbJmozTy1n8fYJoX+QoAj9slIkKM= github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049/go.mod h1:DqIrnl0rp3Zybg9zbJmozTy1n8fYJoX+QoAj9slIkKM=
github.com/fatedier/kcp-go v0.0.0-20171023144637-cd167d2f15f4/go.mod h1:YpCOaxj7vvMThhIQ9AfTOPW2sfztQR5WDfs7AflSy4s= github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible h1:ssXat9YXFvigNge/IkkZvFMn8yeYKFX+uI6wn2mLJ74=
github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible/go.mod h1:YpCOaxj7vvMThhIQ9AfTOPW2sfztQR5WDfs7AflSy4s=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 h1:K9KHZbXKpGydfDN0aZrsoHpLJlZsBrGMFWbgLDGnPZk=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
github.com/gorilla/websocket v1.2.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/hashicorp/yamux v0.0.0-20180314200745-2658be15c5f0/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/reedsolomon v1.9.1 h1:kYrT1MlR4JH6PqOpC+okdb9CDTcwEC/BqpzK4WFyXL8=
github.com/klauspost/reedsolomon v1.9.1/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc h1:lNOt1SMsgHXTdpuGw+RpnJtzUcCb/oRKZP65pBy9pr8=
github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc/go.mod h1:6/gX3+E/IYGa0wMORlSMla999awQFdbaeQCHjSMKIzY=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rakyll/statik v0.1.1 h1:fCLHsIMajHqD5RKigbFXpvX3dN7c80Pm12+NCrI3kvg=
github.com/rakyll/statik v0.1.1/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= github.com/rakyll/statik v0.1.1/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
github.com/rodaine/table v1.0.0 h1:UaCJG5Axc/cNXVGXqnCrffm1KxP0OfYLe1HuJLf5sFY=
github.com/rodaine/table v1.0.0/go.mod h1:YAUzwPOji0DUJNEvggdxyQcUAl4g3hDRcFlyjnnR51I= github.com/rodaine/table v1.0.0/go.mod h1:YAUzwPOji0DUJNEvggdxyQcUAl4g3hDRcFlyjnnR51I=
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047 h1:K+jtWCOuZgCra7eXZ/VWn2FbJmrA/D058mTXhh2rq+8=
github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU= github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU=
github.com/templexxx/reedsolomon v0.0.0-20170926020725-5e06b81a1c76/go.mod h1:ToWcj2sZ6xHl14JjZiVDktYpFtrFZJXBlsu7TV23lNg= github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554 h1:pexgSe+JCFuxG+uoMZLO+ce8KHtdHGhst4cs6rw3gmk=
github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4= github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4=
github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8 h1:6CNSDqI1wiE+JqyOy5Qt/yo/DoNI2/QmmOZeiCid2Nw=
github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8/go.mod h1:XxO4hdhhrzAd+G4CjDqaOkd0hUzmtPR/d3EiBBMn/wc= github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8/go.mod h1:XxO4hdhhrzAd+G4CjDqaOkd0hUzmtPR/d3EiBBMn/wc=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw= github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM=
golang.org/x/net v0.0.0-20180524181706-dfa909b99c79/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=

View File

@@ -44,6 +44,7 @@ type ClientCommonConf struct {
LoginFailExit bool `json:"login_fail_exit"` LoginFailExit bool `json:"login_fail_exit"`
Start map[string]struct{} `json:"start"` Start map[string]struct{} `json:"start"`
Protocol string `json:"protocol"` Protocol string `json:"protocol"`
TLSEnable bool `json:"tls_enable"`
HeartBeatInterval int64 `json:"heartbeat_interval"` HeartBeatInterval int64 `json:"heartbeat_interval"`
HeartBeatTimeout int64 `json:"heartbeat_timeout"` HeartBeatTimeout int64 `json:"heartbeat_timeout"`
} }
@@ -69,6 +70,7 @@ func GetDefaultClientConf() *ClientCommonConf {
LoginFailExit: true, LoginFailExit: true,
Start: make(map[string]struct{}), Start: make(map[string]struct{}),
Protocol: "tcp", Protocol: "tcp",
TLSEnable: false,
HeartBeatInterval: 30, HeartBeatInterval: 30,
HeartBeatTimeout: 90, HeartBeatTimeout: 90,
} }
@@ -194,6 +196,12 @@ func UnmarshalClientConfFromIni(defaultCfg *ClientCommonConf, content string) (c
cfg.Protocol = tmpStr cfg.Protocol = tmpStr
} }
if tmpStr, ok = conf.Get("common", "tls_enable"); ok && tmpStr == "true" {
cfg.TLSEnable = true
} else {
cfg.TLSEnable = false
}
if tmpStr, ok = conf.Get("common", "heartbeat_timeout"); ok { if tmpStr, ok = conf.Get("common", "heartbeat_timeout"); ok {
if v, err = strconv.ParseInt(tmpStr, 10, 64); err != nil { if v, err = strconv.ParseInt(tmpStr, 10, 64); err != nil {
err = fmt.Errorf("Parse conf error: invalid heartbeat_timeout") err = fmt.Errorf("Parse conf error: invalid heartbeat_timeout")

View File

@@ -107,8 +107,10 @@ type BaseProxyConf struct {
Group string `json:"group"` Group string `json:"group"`
GroupKey string `json:"group_key"` GroupKey string `json:"group_key"`
// only used for client
ProxyProtocolVersion string `json:"proxy_protocol_version"`
LocalSvrConf LocalSvrConf
HealthCheckConf // only used for client HealthCheckConf
} }
func (cfg *BaseProxyConf) GetBaseInfo() *BaseProxyConf { func (cfg *BaseProxyConf) GetBaseInfo() *BaseProxyConf {
@@ -121,7 +123,8 @@ func (cfg *BaseProxyConf) compare(cmp *BaseProxyConf) bool {
cfg.UseEncryption != cmp.UseEncryption || cfg.UseEncryption != cmp.UseEncryption ||
cfg.UseCompression != cmp.UseCompression || cfg.UseCompression != cmp.UseCompression ||
cfg.Group != cmp.Group || cfg.Group != cmp.Group ||
cfg.GroupKey != cmp.GroupKey { cfg.GroupKey != cmp.GroupKey ||
cfg.ProxyProtocolVersion != cmp.ProxyProtocolVersion {
return false return false
} }
if !cfg.LocalSvrConf.compare(&cmp.LocalSvrConf) { if !cfg.LocalSvrConf.compare(&cmp.LocalSvrConf) {
@@ -162,6 +165,7 @@ func (cfg *BaseProxyConf) UnmarshalFromIni(prefix string, name string, section i
cfg.Group = section["group"] cfg.Group = section["group"]
cfg.GroupKey = section["group_key"] cfg.GroupKey = section["group_key"]
cfg.ProxyProtocolVersion = section["proxy_protocol_version"]
if err := cfg.LocalSvrConf.UnmarshalFromIni(prefix, name, section); err != nil { if err := cfg.LocalSvrConf.UnmarshalFromIni(prefix, name, section); err != nil {
return err return err
@@ -194,6 +198,12 @@ func (cfg *BaseProxyConf) MarshalToMsg(pMsg *msg.NewProxy) {
} }
func (cfg *BaseProxyConf) checkForCli() (err error) { func (cfg *BaseProxyConf) checkForCli() (err error) {
if cfg.ProxyProtocolVersion != "" {
if cfg.ProxyProtocolVersion != "v1" && cfg.ProxyProtocolVersion != "v2" {
return fmt.Errorf("no support proxy protocol version: %s", cfg.ProxyProtocolVersion)
}
}
if err = cfg.LocalSvrConf.checkForCli(); err != nil { if err = cfg.LocalSvrConf.checkForCli(); err != nil {
return return
} }

View File

@@ -69,6 +69,7 @@ type ServerCommonConf struct {
Token string `json:"token"` Token string `json:"token"`
SubDomainHost string `json:"subdomain_host"` SubDomainHost string `json:"subdomain_host"`
TcpMux bool `json:"tcp_mux"` TcpMux bool `json:"tcp_mux"`
Custom404Page string `json:"custom_404_page"`
AllowPorts map[int]struct{} AllowPorts map[int]struct{}
MaxPoolCount int64 `json:"max_pool_count"` MaxPoolCount int64 `json:"max_pool_count"`
@@ -104,6 +105,7 @@ func GetDefaultServerConf() *ServerCommonConf {
MaxPortsPerClient: 0, MaxPortsPerClient: 0,
HeartBeatTimeout: 90, HeartBeatTimeout: 90,
UserConnTimeout: 10, UserConnTimeout: 10,
Custom404Page: "",
} }
} }
@@ -293,6 +295,10 @@ func UnmarshalServerConfFromIni(defaultCfg *ServerCommonConf, content string) (c
cfg.TcpMux = true cfg.TcpMux = true
} }
if tmpStr, ok = conf.Get("common", "custom_404_page"); ok {
cfg.Custom404Page = tmpStr
}
if tmpStr, ok = conf.Get("common", "heartbeat_timeout"); ok { if tmpStr, ok = conf.Get("common", "heartbeat_timeout"); ok {
v, errRet := strconv.ParseInt(tmpStr, 10, 64) v, errRet := strconv.ParseInt(tmpStr, 10, 64)
if errRet != nil { if errRet != nil {

View File

@@ -17,44 +17,46 @@ package msg
import "net" import "net"
const ( const (
TypeLogin = 'o' TypeLogin = 'o'
TypeLoginResp = '1' TypeLoginResp = '1'
TypeNewProxy = 'p' TypeNewProxy = 'p'
TypeNewProxyResp = '2' TypeNewProxyResp = '2'
TypeCloseProxy = 'c' TypeCloseProxy = 'c'
TypeNewWorkConn = 'w' TypeNewWorkConn = 'w'
TypeReqWorkConn = 'r' TypeReqWorkConn = 'r'
TypeStartWorkConn = 's' TypeStartWorkConn = 's'
TypeNewVisitorConn = 'v' TypeNewVisitorConn = 'v'
TypeNewVisitorConnResp = '3' TypeNewVisitorConnResp = '3'
TypePing = 'h' TypePing = 'h'
TypePong = '4' TypePong = '4'
TypeUdpPacket = 'u' TypeUdpPacket = 'u'
TypeNatHoleVisitor = 'i' TypeNatHoleVisitor = 'i'
TypeNatHoleClient = 'n' TypeNatHoleClient = 'n'
TypeNatHoleResp = 'm' TypeNatHoleResp = 'm'
TypeNatHoleSid = '5' TypeNatHoleClientDetectOK = 'd'
TypeNatHoleSid = '5'
) )
var ( var (
msgTypeMap = map[byte]interface{}{ msgTypeMap = map[byte]interface{}{
TypeLogin: Login{}, TypeLogin: Login{},
TypeLoginResp: LoginResp{}, TypeLoginResp: LoginResp{},
TypeNewProxy: NewProxy{}, TypeNewProxy: NewProxy{},
TypeNewProxyResp: NewProxyResp{}, TypeNewProxyResp: NewProxyResp{},
TypeCloseProxy: CloseProxy{}, TypeCloseProxy: CloseProxy{},
TypeNewWorkConn: NewWorkConn{}, TypeNewWorkConn: NewWorkConn{},
TypeReqWorkConn: ReqWorkConn{}, TypeReqWorkConn: ReqWorkConn{},
TypeStartWorkConn: StartWorkConn{}, TypeStartWorkConn: StartWorkConn{},
TypeNewVisitorConn: NewVisitorConn{}, TypeNewVisitorConn: NewVisitorConn{},
TypeNewVisitorConnResp: NewVisitorConnResp{}, TypeNewVisitorConnResp: NewVisitorConnResp{},
TypePing: Ping{}, TypePing: Ping{},
TypePong: Pong{}, TypePong: Pong{},
TypeUdpPacket: UdpPacket{}, TypeUdpPacket: UdpPacket{},
TypeNatHoleVisitor: NatHoleVisitor{}, TypeNatHoleVisitor: NatHoleVisitor{},
TypeNatHoleClient: NatHoleClient{}, TypeNatHoleClient: NatHoleClient{},
TypeNatHoleResp: NatHoleResp{}, TypeNatHoleResp: NatHoleResp{},
TypeNatHoleSid: NatHoleSid{}, TypeNatHoleClientDetectOK: NatHoleClientDetectOK{},
TypeNatHoleSid: NatHoleSid{},
} }
) )
@@ -124,6 +126,10 @@ type ReqWorkConn struct {
type StartWorkConn struct { type StartWorkConn struct {
ProxyName string `json:"proxy_name"` ProxyName string `json:"proxy_name"`
SrcAddr string `json:"src_addr"`
DstAddr string `json:"dst_addr"`
SrcPort uint16 `json:"src_port"`
DstPort uint16 `json:"dst_port"`
} }
type NewVisitorConn struct { type NewVisitorConn struct {
@@ -169,6 +175,9 @@ type NatHoleResp struct {
Error string `json:"error"` Error string `json:"error"`
} }
type NatHoleClientDetectOK struct {
}
type NatHoleSid struct { type NatHoleSid struct {
Sid string `json:"sid"` Sid string `json:"sid"`
} }

View File

@@ -18,6 +18,11 @@ import (
// Timeout seconds. // Timeout seconds.
var NatHoleTimeout int64 = 10 var NatHoleTimeout int64 = 10
type SidRequest struct {
Sid string
NotifyCh chan struct{}
}
type NatHoleController struct { type NatHoleController struct {
listener *net.UDPConn listener *net.UDPConn
@@ -44,11 +49,11 @@ func NewNatHoleController(udpBindAddr string) (nc *NatHoleController, err error)
return nc, nil return nc, nil
} }
func (nc *NatHoleController) ListenClient(name string, sk string) (sidCh chan string) { func (nc *NatHoleController) ListenClient(name string, sk string) (sidCh chan *SidRequest) {
clientCfg := &NatHoleClientCfg{ clientCfg := &NatHoleClientCfg{
Name: name, Name: name,
Sk: sk, Sk: sk,
SidCh: make(chan string), SidCh: make(chan *SidRequest),
} }
nc.mu.Lock() nc.mu.Lock()
nc.clientCfgs[name] = clientCfg nc.clientCfgs[name] = clientCfg
@@ -132,7 +137,10 @@ func (nc *NatHoleController) HandleVisitor(m *msg.NatHoleVisitor, raddr *net.UDP
}() }()
err := errors.PanicToError(func() { err := errors.PanicToError(func() {
clientCfg.SidCh <- sid clientCfg.SidCh <- &SidRequest{
Sid: sid,
NotifyCh: session.NotifyCh,
}
}) })
if err != nil { if err != nil {
return return
@@ -158,7 +166,6 @@ func (nc *NatHoleController) HandleClient(m *msg.NatHoleClient, raddr *net.UDPAd
} }
log.Trace("handle client message, sid [%s]", session.Sid) log.Trace("handle client message, sid [%s]", session.Sid)
session.ClientAddr = raddr session.ClientAddr = raddr
session.NotifyCh <- struct{}{}
resp := nc.GenNatHoleResponse(session, "") resp := nc.GenNatHoleResponse(session, "")
log.Trace("send nat hole response to client") log.Trace("send nat hole response to client")
@@ -201,5 +208,5 @@ type NatHoleSession struct {
type NatHoleClientCfg struct { type NatHoleClientCfg struct {
Name string Name string
Sk string Sk string
SidCh chan string SidCh chan *SidRequest
} }

View File

@@ -64,7 +64,7 @@ func (hp *HttpProxy) Name() string {
return PluginHttpProxy return PluginHttpProxy
} }
func (hp *HttpProxy) Handle(conn io.ReadWriteCloser, realConn frpNet.Conn) { func (hp *HttpProxy) Handle(conn io.ReadWriteCloser, realConn frpNet.Conn, extraBufToLocal []byte) {
wrapConn := frpNet.WrapReadWriteCloserToConn(conn, realConn) wrapConn := frpNet.WrapReadWriteCloserToConn(conn, realConn)
sc, rd := gnet.NewSharedConn(wrapConn) sc, rd := gnet.NewSharedConn(wrapConn)

114
models/plugin/https2http.go Normal file
View File

@@ -0,0 +1,114 @@
// Copyright 2019 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package plugin
import (
"crypto/tls"
"fmt"
"io"
"net/http"
"net/http/httputil"
frpNet "github.com/fatedier/frp/utils/net"
)
const PluginHTTPS2HTTP = "https2http"
func init() {
Register(PluginHTTPS2HTTP, NewHTTPS2HTTPPlugin)
}
type HTTPS2HTTPPlugin struct {
crtPath string
keyPath string
hostHeaderRewrite string
localAddr string
l *Listener
s *http.Server
}
func NewHTTPS2HTTPPlugin(params map[string]string) (Plugin, error) {
crtPath := params["plugin_crt_path"]
keyPath := params["plugin_key_path"]
localAddr := params["plugin_local_addr"]
hostHeaderRewrite := params["plugin_host_header_rewrite"]
if crtPath == "" {
return nil, fmt.Errorf("plugin_crt_path is required")
}
if keyPath == "" {
return nil, fmt.Errorf("plugin_key_path is required")
}
if localAddr == "" {
return nil, fmt.Errorf("plugin_local_addr is required")
}
listener := NewProxyListener()
p := &HTTPS2HTTPPlugin{
crtPath: crtPath,
keyPath: keyPath,
localAddr: localAddr,
hostHeaderRewrite: hostHeaderRewrite,
l: listener,
}
rp := &httputil.ReverseProxy{
Director: func(req *http.Request) {
req.URL.Scheme = "http"
req.URL.Host = p.localAddr
if p.hostHeaderRewrite != "" {
req.Host = p.hostHeaderRewrite
}
},
}
p.s = &http.Server{
Handler: rp,
}
tlsConfig, err := p.genTLSConfig()
if err != nil {
return nil, fmt.Errorf("gen TLS config error: %v", err)
}
ln := tls.NewListener(listener, tlsConfig)
go p.s.Serve(ln)
return p, nil
}
func (p *HTTPS2HTTPPlugin) genTLSConfig() (*tls.Config, error) {
cert, err := tls.LoadX509KeyPair(p.crtPath, p.keyPath)
if err != nil {
return nil, err
}
config := &tls.Config{Certificates: []tls.Certificate{cert}}
return config, nil
}
func (p *HTTPS2HTTPPlugin) Handle(conn io.ReadWriteCloser, realConn frpNet.Conn, extraBufToLocal []byte) {
wrapConn := frpNet.WrapReadWriteCloserToConn(conn, realConn)
p.l.PutConn(wrapConn)
}
func (p *HTTPS2HTTPPlugin) Name() string {
return PluginHTTPS2HTTP
}
func (p *HTTPS2HTTPPlugin) Close() error {
return nil
}

View File

@@ -46,7 +46,7 @@ func Create(name string, params map[string]string) (p Plugin, err error) {
type Plugin interface { type Plugin interface {
Name() string Name() string
Handle(conn io.ReadWriteCloser, realConn frpNet.Conn) Handle(conn io.ReadWriteCloser, realConn frpNet.Conn, extraBufToLocal []byte)
Close() error Close() error
} }

View File

@@ -53,7 +53,7 @@ func NewSocks5Plugin(params map[string]string) (p Plugin, err error) {
return return
} }
func (sp *Socks5Plugin) Handle(conn io.ReadWriteCloser, realConn frpNet.Conn) { func (sp *Socks5Plugin) Handle(conn io.ReadWriteCloser, realConn frpNet.Conn, extraBufToLocal []byte) {
defer conn.Close() defer conn.Close()
wrapConn := frpNet.WrapReadWriteCloserToConn(conn, realConn) wrapConn := frpNet.WrapReadWriteCloserToConn(conn, realConn)
sp.Server.ServeConn(wrapConn) sp.Server.ServeConn(wrapConn)

View File

@@ -72,7 +72,7 @@ func NewStaticFilePlugin(params map[string]string) (Plugin, error) {
return sp, nil return sp, nil
} }
func (sp *StaticFilePlugin) Handle(conn io.ReadWriteCloser, realConn frpNet.Conn) { func (sp *StaticFilePlugin) Handle(conn io.ReadWriteCloser, realConn frpNet.Conn, extraBufToLocal []byte) {
wrapConn := frpNet.WrapReadWriteCloserToConn(conn, realConn) wrapConn := frpNet.WrapReadWriteCloserToConn(conn, realConn)
sp.l.PutConn(wrapConn) sp.l.PutConn(wrapConn)
} }

View File

@@ -53,11 +53,14 @@ func NewUnixDomainSocketPlugin(params map[string]string) (p Plugin, err error) {
return return
} }
func (uds *UnixDomainSocketPlugin) Handle(conn io.ReadWriteCloser, realConn frpNet.Conn) { func (uds *UnixDomainSocketPlugin) Handle(conn io.ReadWriteCloser, realConn frpNet.Conn, extraBufToLocal []byte) {
localConn, err := net.DialUnix("unix", nil, uds.UnixAddr) localConn, err := net.DialUnix("unix", nil, uds.UnixAddr)
if err != nil { if err != nil {
return return
} }
if len(extraBufToLocal) > 0 {
localConn.Write(extraBufToLocal)
}
frpIo.Join(localConn, conn) frpIo.Join(localConn, conn)
} }

View File

@@ -44,7 +44,7 @@ for os in $os_all; do
mv ./frps_${os}_${arch} ${frp_path}/frps mv ./frps_${os}_${arch} ${frp_path}/frps
fi fi
cp ./LICENSE ${frp_path} cp ./LICENSE ${frp_path}
cp ./conf/* ${frp_path} cp -rf ./conf/* ${frp_path}
# packages # packages
cd ./packages cd ./packages

View File

@@ -301,6 +301,7 @@ func (ctl *Control) reader() {
return return
} else { } else {
ctl.conn.Warn("read error: %v", err) ctl.conn.Warn("read error: %v", err)
ctl.conn.Close()
return return
} }
} else { } else {

View File

@@ -29,6 +29,9 @@ type ResourceController struct {
// Tcp Group Controller // Tcp Group Controller
TcpGroupCtl *group.TcpGroupCtl TcpGroupCtl *group.TcpGroupCtl
// HTTP Group Controller
HTTPGroupCtl *group.HTTPGroupController
// Manage all tcp ports // Manage all tcp ports
TcpPortManager *ports.PortManager TcpPortManager *ports.PortManager
@@ -38,7 +41,7 @@ type ResourceController struct {
// For http proxies, forwarding http requests // For http proxies, forwarding http requests
HttpReverseProxy *vhost.HttpReverseProxy HttpReverseProxy *vhost.HttpReverseProxy
// For https proxies, route requests to different clients by hostname and other infomation // For https proxies, route requests to different clients by hostname and other information
VhostHttpsMuxer *vhost.HttpsMuxer VhostHttpsMuxer *vhost.HttpsMuxer
// Controller for nat hole connections // Controller for nat hole connections

View File

@@ -279,6 +279,7 @@ func (svr *Service) getProxyStatsByTypeAndName(proxyType string, proxyName strin
proxyInfo.CurConns = ps.CurConns proxyInfo.CurConns = ps.CurConns
proxyInfo.LastStartTime = ps.LastStartTime proxyInfo.LastStartTime = ps.LastStartTime
proxyInfo.LastCloseTime = ps.LastCloseTime proxyInfo.LastCloseTime = ps.LastCloseTime
code = 200
} }
return return

View File

@@ -23,4 +23,5 @@ var (
ErrGroupParamsInvalid = errors.New("group params invalid") ErrGroupParamsInvalid = errors.New("group params invalid")
ErrListenerClosed = errors.New("group listener closed") ErrListenerClosed = errors.New("group listener closed")
ErrGroupDifferentPort = errors.New("group should have same remote port") ErrGroupDifferentPort = errors.New("group should have same remote port")
ErrProxyRepeated = errors.New("group proxy repeated")
) )

157
server/group/http.go Normal file
View File

@@ -0,0 +1,157 @@
package group
import (
"fmt"
"sync"
"sync/atomic"
frpNet "github.com/fatedier/frp/utils/net"
"github.com/fatedier/frp/utils/vhost"
)
type HTTPGroupController struct {
groups map[string]*HTTPGroup
vhostRouter *vhost.VhostRouters
mu sync.Mutex
}
func NewHTTPGroupController(vhostRouter *vhost.VhostRouters) *HTTPGroupController {
return &HTTPGroupController{
groups: make(map[string]*HTTPGroup),
vhostRouter: vhostRouter,
}
}
func (ctl *HTTPGroupController) Register(proxyName, group, groupKey string,
routeConfig vhost.VhostRouteConfig) (err error) {
indexKey := httpGroupIndex(group, routeConfig.Domain, routeConfig.Location)
ctl.mu.Lock()
g, ok := ctl.groups[indexKey]
if !ok {
g = NewHTTPGroup(ctl)
ctl.groups[indexKey] = g
}
ctl.mu.Unlock()
return g.Register(proxyName, group, groupKey, routeConfig)
}
func (ctl *HTTPGroupController) UnRegister(proxyName, group, domain, location string) {
indexKey := httpGroupIndex(group, domain, location)
ctl.mu.Lock()
defer ctl.mu.Unlock()
g, ok := ctl.groups[indexKey]
if !ok {
return
}
isEmpty := g.UnRegister(proxyName)
if isEmpty {
delete(ctl.groups, indexKey)
}
}
type HTTPGroup struct {
group string
groupKey string
domain string
location string
createFuncs map[string]vhost.CreateConnFunc
pxyNames []string
index uint64
ctl *HTTPGroupController
mu sync.RWMutex
}
func NewHTTPGroup(ctl *HTTPGroupController) *HTTPGroup {
return &HTTPGroup{
createFuncs: make(map[string]vhost.CreateConnFunc),
pxyNames: make([]string, 0),
ctl: ctl,
}
}
func (g *HTTPGroup) Register(proxyName, group, groupKey string,
routeConfig vhost.VhostRouteConfig) (err error) {
g.mu.Lock()
defer g.mu.Unlock()
if len(g.createFuncs) == 0 {
// the first proxy in this group
tmp := routeConfig // copy object
tmp.CreateConnFn = g.createConn
err = g.ctl.vhostRouter.Add(routeConfig.Domain, routeConfig.Location, &tmp)
if err != nil {
return
}
g.group = group
g.groupKey = groupKey
g.domain = routeConfig.Domain
g.location = routeConfig.Location
} else {
if g.group != group || g.domain != routeConfig.Domain || g.location != routeConfig.Location {
err = ErrGroupParamsInvalid
return
}
if g.groupKey != groupKey {
err = ErrGroupAuthFailed
return
}
}
if _, ok := g.createFuncs[proxyName]; ok {
err = ErrProxyRepeated
return
}
g.createFuncs[proxyName] = routeConfig.CreateConnFn
g.pxyNames = append(g.pxyNames, proxyName)
return nil
}
func (g *HTTPGroup) UnRegister(proxyName string) (isEmpty bool) {
g.mu.Lock()
defer g.mu.Unlock()
delete(g.createFuncs, proxyName)
for i, name := range g.pxyNames {
if name == proxyName {
g.pxyNames = append(g.pxyNames[:i], g.pxyNames[i+1:]...)
break
}
}
if len(g.createFuncs) == 0 {
isEmpty = true
g.ctl.vhostRouter.Del(g.domain, g.location)
}
return
}
func (g *HTTPGroup) createConn(remoteAddr string) (frpNet.Conn, error) {
var f vhost.CreateConnFunc
newIndex := atomic.AddUint64(&g.index, 1)
g.mu.RLock()
group := g.group
domain := g.domain
location := g.location
if len(g.pxyNames) > 0 {
name := g.pxyNames[int(newIndex)%len(g.pxyNames)]
f, _ = g.createFuncs[name]
}
g.mu.RUnlock()
if f == nil {
return nil, fmt.Errorf("no CreateConnFunc for http group [%s], domain [%s], location [%s]", group, domain, location)
}
return f(remoteAddr)
}
func httpGroupIndex(group, domain, location string) string {
return fmt.Sprintf("%s_%s_%s", group, domain, location)
}

View File

@@ -24,46 +24,47 @@ import (
gerr "github.com/fatedier/golib/errors" gerr "github.com/fatedier/golib/errors"
) )
type TcpGroupListener struct { // TcpGroupCtl manage all TcpGroups
groupName string type TcpGroupCtl struct {
group *TcpGroup groups map[string]*TcpGroup
addr net.Addr // portManager is used to manage port
closeCh chan struct{} portManager *ports.PortManager
mu sync.Mutex
} }
func newTcpGroupListener(name string, group *TcpGroup, addr net.Addr) *TcpGroupListener { // NewTcpGroupCtl return a new TcpGroupCtl
return &TcpGroupListener{ func NewTcpGroupCtl(portManager *ports.PortManager) *TcpGroupCtl {
groupName: name, return &TcpGroupCtl{
group: group, groups: make(map[string]*TcpGroup),
addr: addr, portManager: portManager,
closeCh: make(chan struct{}),
} }
} }
func (ln *TcpGroupListener) Accept() (c net.Conn, err error) { // Listen is the wrapper for TcpGroup's Listen
var ok bool // If there are no group, we will create one here
select { func (tgc *TcpGroupCtl) Listen(proxyName string, group string, groupKey string,
case <-ln.closeCh: addr string, port int) (l net.Listener, realPort int, err error) {
return nil, ErrListenerClosed
case c, ok = <-ln.group.Accept(): tgc.mu.Lock()
if !ok { tcpGroup, ok := tgc.groups[group]
return nil, ErrListenerClosed if !ok {
} tcpGroup = NewTcpGroup(tgc)
return c, nil tgc.groups[group] = tcpGroup
} }
tgc.mu.Unlock()
return tcpGroup.Listen(proxyName, group, groupKey, addr, port)
} }
func (ln *TcpGroupListener) Addr() net.Addr { // RemoveGroup remove TcpGroup from controller
return ln.addr func (tgc *TcpGroupCtl) RemoveGroup(group string) {
} tgc.mu.Lock()
defer tgc.mu.Unlock()
func (ln *TcpGroupListener) Close() (err error) { delete(tgc.groups, group)
close(ln.closeCh)
ln.group.CloseListener(ln)
return
} }
// TcpGroup route connections to different proxies
type TcpGroup struct { type TcpGroup struct {
group string group string
groupKey string groupKey string
@@ -79,6 +80,7 @@ type TcpGroup struct {
mu sync.Mutex mu sync.Mutex
} }
// NewTcpGroup return a new TcpGroup
func NewTcpGroup(ctl *TcpGroupCtl) *TcpGroup { func NewTcpGroup(ctl *TcpGroupCtl) *TcpGroup {
return &TcpGroup{ return &TcpGroup{
lns: make([]*TcpGroupListener, 0), lns: make([]*TcpGroupListener, 0),
@@ -87,10 +89,14 @@ func NewTcpGroup(ctl *TcpGroupCtl) *TcpGroup {
} }
} }
// Listen will return a new TcpGroupListener
// if TcpGroup already has a listener, just add a new TcpGroupListener to the queues
// otherwise, listen on the real address
func (tg *TcpGroup) Listen(proxyName string, group string, groupKey string, addr string, port int) (ln *TcpGroupListener, realPort int, err error) { func (tg *TcpGroup) Listen(proxyName string, group string, groupKey string, addr string, port int) (ln *TcpGroupListener, realPort int, err error) {
tg.mu.Lock() tg.mu.Lock()
defer tg.mu.Unlock() defer tg.mu.Unlock()
if len(tg.lns) == 0 { if len(tg.lns) == 0 {
// the first listener, listen on the real address
realPort, err = tg.ctl.portManager.Acquire(proxyName, port) realPort, err = tg.ctl.portManager.Acquire(proxyName, port)
if err != nil { if err != nil {
return return
@@ -114,6 +120,7 @@ func (tg *TcpGroup) Listen(proxyName string, group string, groupKey string, addr
} }
go tg.worker() go tg.worker()
} else { } else {
// address and port in the same group must be equal
if tg.group != group || tg.addr != addr { if tg.group != group || tg.addr != addr {
err = ErrGroupParamsInvalid err = ErrGroupParamsInvalid
return return
@@ -133,6 +140,7 @@ func (tg *TcpGroup) Listen(proxyName string, group string, groupKey string, addr
return return
} }
// worker is called when the real tcp listener has been created
func (tg *TcpGroup) worker() { func (tg *TcpGroup) worker() {
for { for {
c, err := tg.tcpLn.Accept() c, err := tg.tcpLn.Accept()
@@ -152,6 +160,7 @@ func (tg *TcpGroup) Accept() <-chan net.Conn {
return tg.acceptCh return tg.acceptCh
} }
// CloseListener remove the TcpGroupListener from the TcpGroup
func (tg *TcpGroup) CloseListener(ln *TcpGroupListener) { func (tg *TcpGroup) CloseListener(ln *TcpGroupListener) {
tg.mu.Lock() tg.mu.Lock()
defer tg.mu.Unlock() defer tg.mu.Unlock()
@@ -169,36 +178,47 @@ func (tg *TcpGroup) CloseListener(ln *TcpGroupListener) {
} }
} }
type TcpGroupCtl struct { // TcpGroupListener
groups map[string]*TcpGroup type TcpGroupListener struct {
groupName string
group *TcpGroup
portManager *ports.PortManager addr net.Addr
mu sync.Mutex closeCh chan struct{}
} }
func NewTcpGroupCtl(portManager *ports.PortManager) *TcpGroupCtl { func newTcpGroupListener(name string, group *TcpGroup, addr net.Addr) *TcpGroupListener {
return &TcpGroupCtl{ return &TcpGroupListener{
groups: make(map[string]*TcpGroup), groupName: name,
portManager: portManager, group: group,
addr: addr,
closeCh: make(chan struct{}),
} }
} }
func (tgc *TcpGroupCtl) Listen(proxyNanme string, group string, groupKey string, // Accept will accept connections from TcpGroup
addr string, port int) (l net.Listener, realPort int, err error) { func (ln *TcpGroupListener) Accept() (c net.Conn, err error) {
var ok bool
tgc.mu.Lock() select {
defer tgc.mu.Unlock() case <-ln.closeCh:
if tcpGroup, ok := tgc.groups[group]; ok { return nil, ErrListenerClosed
return tcpGroup.Listen(proxyNanme, group, groupKey, addr, port) case c, ok = <-ln.group.Accept():
} else { if !ok {
tcpGroup = NewTcpGroup(tgc) return nil, ErrListenerClosed
tgc.groups[group] = tcpGroup }
return tcpGroup.Listen(proxyNanme, group, groupKey, addr, port) return c, nil
} }
} }
func (tgc *TcpGroupCtl) RemoveGroup(group string) { func (ln *TcpGroupListener) Addr() net.Addr {
tgc.mu.Lock() return ln.addr
defer tgc.mu.Unlock() }
delete(tgc.groups, group)
// Close close the listener
func (ln *TcpGroupListener) Close() (err error) {
close(ln.closeCh)
// remove self from TcpGroup
ln.group.CloseListener(ln)
return
} }

View File

@@ -16,6 +16,7 @@ package proxy
import ( import (
"io" "io"
"net"
"strings" "strings"
"github.com/fatedier/frp/g" "github.com/fatedier/frp/g"
@@ -49,22 +50,46 @@ func (pxy *HttpProxy) Run() (remoteAddr string, err error) {
locations = []string{""} locations = []string{""}
} }
defer func() {
if err != nil {
pxy.Close()
}
}()
addrs := make([]string, 0) addrs := make([]string, 0)
for _, domain := range pxy.cfg.CustomDomains { for _, domain := range pxy.cfg.CustomDomains {
if domain == "" {
continue
}
routeConfig.Domain = domain routeConfig.Domain = domain
for _, location := range locations { for _, location := range locations {
routeConfig.Location = location routeConfig.Location = location
err = pxy.rc.HttpReverseProxy.Register(routeConfig)
if err != nil {
return
}
tmpDomain := routeConfig.Domain tmpDomain := routeConfig.Domain
tmpLocation := routeConfig.Location tmpLocation := routeConfig.Location
addrs = append(addrs, util.CanonicalAddr(tmpDomain, int(g.GlbServerCfg.VhostHttpPort)))
pxy.closeFuncs = append(pxy.closeFuncs, func() { // handle group
pxy.rc.HttpReverseProxy.UnRegister(tmpDomain, tmpLocation) if pxy.cfg.Group != "" {
}) err = pxy.rc.HTTPGroupCtl.Register(pxy.name, pxy.cfg.Group, pxy.cfg.GroupKey, routeConfig)
pxy.Info("http proxy listen for host [%s] location [%s]", routeConfig.Domain, routeConfig.Location) if err != nil {
return
}
pxy.closeFuncs = append(pxy.closeFuncs, func() {
pxy.rc.HTTPGroupCtl.UnRegister(pxy.name, pxy.cfg.Group, tmpDomain, tmpLocation)
})
} else {
// no group
err = pxy.rc.HttpReverseProxy.Register(routeConfig)
if err != nil {
return
}
pxy.closeFuncs = append(pxy.closeFuncs, func() {
pxy.rc.HttpReverseProxy.UnRegister(tmpDomain, tmpLocation)
})
}
addrs = append(addrs, util.CanonicalAddr(routeConfig.Domain, int(g.GlbServerCfg.VhostHttpPort)))
pxy.Info("http proxy listen for host [%s] location [%s] group [%s]", routeConfig.Domain, routeConfig.Location, pxy.cfg.Group)
} }
} }
@@ -72,17 +97,31 @@ func (pxy *HttpProxy) Run() (remoteAddr string, err error) {
routeConfig.Domain = pxy.cfg.SubDomain + "." + g.GlbServerCfg.SubDomainHost routeConfig.Domain = pxy.cfg.SubDomain + "." + g.GlbServerCfg.SubDomainHost
for _, location := range locations { for _, location := range locations {
routeConfig.Location = location routeConfig.Location = location
err = pxy.rc.HttpReverseProxy.Register(routeConfig)
if err != nil {
return
}
tmpDomain := routeConfig.Domain tmpDomain := routeConfig.Domain
tmpLocation := routeConfig.Location tmpLocation := routeConfig.Location
// handle group
if pxy.cfg.Group != "" {
err = pxy.rc.HTTPGroupCtl.Register(pxy.name, pxy.cfg.Group, pxy.cfg.GroupKey, routeConfig)
if err != nil {
return
}
pxy.closeFuncs = append(pxy.closeFuncs, func() {
pxy.rc.HTTPGroupCtl.UnRegister(pxy.name, pxy.cfg.Group, tmpDomain, tmpLocation)
})
} else {
err = pxy.rc.HttpReverseProxy.Register(routeConfig)
if err != nil {
return
}
pxy.closeFuncs = append(pxy.closeFuncs, func() {
pxy.rc.HttpReverseProxy.UnRegister(tmpDomain, tmpLocation)
})
}
addrs = append(addrs, util.CanonicalAddr(tmpDomain, g.GlbServerCfg.VhostHttpPort)) addrs = append(addrs, util.CanonicalAddr(tmpDomain, g.GlbServerCfg.VhostHttpPort))
pxy.closeFuncs = append(pxy.closeFuncs, func() {
pxy.rc.HttpReverseProxy.UnRegister(tmpDomain, tmpLocation) pxy.Info("http proxy listen for host [%s] location [%s] group [%s]", routeConfig.Domain, routeConfig.Location, pxy.cfg.Group)
})
pxy.Info("http proxy listen for host [%s] location [%s]", routeConfig.Domain, routeConfig.Location)
} }
} }
remoteAddr = strings.Join(addrs, ",") remoteAddr = strings.Join(addrs, ",")
@@ -93,8 +132,14 @@ func (pxy *HttpProxy) GetConf() config.ProxyConf {
return pxy.cfg return pxy.cfg
} }
func (pxy *HttpProxy) GetRealConn() (workConn frpNet.Conn, err error) { func (pxy *HttpProxy) GetRealConn(remoteAddr string) (workConn frpNet.Conn, err error) {
tmpConn, errRet := pxy.GetWorkConnFromPool() rAddr, errRet := net.ResolveTCPAddr("tcp", remoteAddr)
if errRet != nil {
pxy.Warn("resolve TCP addr [%s] error: %v", remoteAddr, errRet)
// we do not return error here since remoteAddr is not necessary for proxies without proxy protocol enabled
}
tmpConn, errRet := pxy.GetWorkConnFromPool(rAddr, nil)
if errRet != nil { if errRet != nil {
err = errRet err = errRet
return return

View File

@@ -31,8 +31,17 @@ type HttpsProxy struct {
func (pxy *HttpsProxy) Run() (remoteAddr string, err error) { func (pxy *HttpsProxy) Run() (remoteAddr string, err error) {
routeConfig := &vhost.VhostRouteConfig{} routeConfig := &vhost.VhostRouteConfig{}
defer func() {
if err != nil {
pxy.Close()
}
}()
addrs := make([]string, 0) addrs := make([]string, 0)
for _, domain := range pxy.cfg.CustomDomains { for _, domain := range pxy.cfg.CustomDomains {
if domain == "" {
continue
}
routeConfig.Domain = domain routeConfig.Domain = domain
l, errRet := pxy.rc.VhostHttpsMuxer.Listen(routeConfig) l, errRet := pxy.rc.VhostHttpsMuxer.Listen(routeConfig)
if errRet != nil { if errRet != nil {

View File

@@ -17,6 +17,8 @@ package proxy
import ( import (
"fmt" "fmt"
"io" "io"
"net"
"strconv"
"sync" "sync"
"github.com/fatedier/frp/g" "github.com/fatedier/frp/g"
@@ -36,7 +38,7 @@ type Proxy interface {
Run() (remoteAddr string, err error) Run() (remoteAddr string, err error)
GetName() string GetName() string
GetConf() config.ProxyConf GetConf() config.ProxyConf
GetWorkConnFromPool() (workConn frpNet.Conn, err error) GetWorkConnFromPool(src, dst net.Addr) (workConn frpNet.Conn, err error)
GetUsedPortsNum() int GetUsedPortsNum() int
Close() Close()
log.Logger log.Logger
@@ -70,7 +72,9 @@ func (pxy *BaseProxy) Close() {
} }
} }
func (pxy *BaseProxy) GetWorkConnFromPool() (workConn frpNet.Conn, err error) { // GetWorkConnFromPool try to get a new work connections from pool
// for quickly response, we immediately send the StartWorkConn message to frpc after take out one from pool
func (pxy *BaseProxy) GetWorkConnFromPool(src, dst net.Addr) (workConn frpNet.Conn, err error) {
// try all connections from the pool // try all connections from the pool
for i := 0; i < pxy.poolCount+1; i++ { for i := 0; i < pxy.poolCount+1; i++ {
if workConn, err = pxy.getWorkConnFn(); err != nil { if workConn, err = pxy.getWorkConnFn(); err != nil {
@@ -80,8 +84,29 @@ func (pxy *BaseProxy) GetWorkConnFromPool() (workConn frpNet.Conn, err error) {
pxy.Info("get a new work connection: [%s]", workConn.RemoteAddr().String()) pxy.Info("get a new work connection: [%s]", workConn.RemoteAddr().String())
workConn.AddLogPrefix(pxy.GetName()) workConn.AddLogPrefix(pxy.GetName())
var (
srcAddr string
dstAddr string
srcPortStr string
dstPortStr string
srcPort int
dstPort int
)
if src != nil {
srcAddr, srcPortStr, _ = net.SplitHostPort(src.String())
srcPort, _ = strconv.Atoi(srcPortStr)
}
if dst != nil {
dstAddr, dstPortStr, _ = net.SplitHostPort(dst.String())
dstPort, _ = strconv.Atoi(dstPortStr)
}
err := msg.WriteMsg(workConn, &msg.StartWorkConn{ err := msg.WriteMsg(workConn, &msg.StartWorkConn{
ProxyName: pxy.GetName(), ProxyName: pxy.GetName(),
SrcAddr: srcAddr,
SrcPort: uint16(srcPort),
DstAddr: dstAddr,
DstPort: uint16(dstPort),
}) })
if err != nil { if err != nil {
workConn.Warn("failed to send message to work connection from pool: %v, times: %d", err, i) workConn.Warn("failed to send message to work connection from pool: %v, times: %d", err, i)
@@ -177,7 +202,7 @@ func HandleUserTcpConnection(pxy Proxy, userConn frpNet.Conn, statsCollector sta
defer userConn.Close() defer userConn.Close()
// try all connections from the pool // try all connections from the pool
workConn, err := pxy.GetWorkConnFromPool() workConn, err := pxy.GetWorkConnFromPool(userConn.RemoteAddr(), userConn.LocalAddr())
if err != nil { if err != nil {
return return
} }

View File

@@ -160,7 +160,7 @@ func (pxy *UdpProxy) Run() (remoteAddr string, err error) {
// Sleep a while for waiting control send the NewProxyResp to client. // Sleep a while for waiting control send the NewProxyResp to client.
time.Sleep(500 * time.Millisecond) time.Sleep(500 * time.Millisecond)
for { for {
workConn, err := pxy.GetWorkConnFromPool() workConn, err := pxy.GetWorkConnFromPool(nil, nil)
if err != nil { if err != nil {
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
// check if proxy is closed // check if proxy is closed

View File

@@ -42,18 +42,40 @@ func (pxy *XtcpProxy) Run() (remoteAddr string, err error) {
select { select {
case <-pxy.closeCh: case <-pxy.closeCh:
break break
case sid := <-sidCh: case sidRequest := <-sidCh:
workConn, errRet := pxy.GetWorkConnFromPool() sr := sidRequest
workConn, errRet := pxy.GetWorkConnFromPool(nil, nil)
if errRet != nil { if errRet != nil {
continue continue
} }
m := &msg.NatHoleSid{ m := &msg.NatHoleSid{
Sid: sid, Sid: sr.Sid,
} }
errRet = msg.WriteMsg(workConn, m) errRet = msg.WriteMsg(workConn, m)
if errRet != nil { if errRet != nil {
pxy.Warn("write nat hole sid package error, %v", errRet) pxy.Warn("write nat hole sid package error, %v", errRet)
workConn.Close()
break
} }
go func() {
raw, errRet := msg.ReadMsg(workConn)
if errRet != nil {
pxy.Warn("read nat hole client ok package error: %v", errRet)
workConn.Close()
return
}
if _, ok := raw.(*msg.NatHoleClientDetectOK); !ok {
pxy.Warn("read nat hole client ok package format error")
workConn.Close()
return
}
select {
case sr.NotifyCh <- struct{}{}:
default:
}
}()
} }
} }
}() }()

View File

@@ -16,8 +16,14 @@ package server
import ( import (
"bytes" "bytes"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"math/big"
"net" "net"
"net/http" "net/http"
"time" "time"
@@ -61,17 +67,25 @@ type Service struct {
// Accept connections using websocket // Accept connections using websocket
websocketListener frpNet.Listener websocketListener frpNet.Listener
// Accept frp tls connections
tlsListener frpNet.Listener
// Manage all controllers // Manage all controllers
ctlManager *ControlManager ctlManager *ControlManager
// Manage all proxies // Manage all proxies
pxyManager *proxy.ProxyManager pxyManager *proxy.ProxyManager
// HTTP vhost router
httpVhostRouter *vhost.VhostRouters
// All resource managers and controllers // All resource managers and controllers
rc *controller.ResourceController rc *controller.ResourceController
// stats collector to store server and proxies stats info // stats collector to store server and proxies stats info
statsCollector stats.Collector statsCollector stats.Collector
tlsConfig *tls.Config
} }
func NewService() (svr *Service, err error) { func NewService() (svr *Service, err error) {
@@ -84,11 +98,16 @@ func NewService() (svr *Service, err error) {
TcpPortManager: ports.NewPortManager("tcp", cfg.ProxyBindAddr, cfg.AllowPorts), TcpPortManager: ports.NewPortManager("tcp", cfg.ProxyBindAddr, cfg.AllowPorts),
UdpPortManager: ports.NewPortManager("udp", cfg.ProxyBindAddr, cfg.AllowPorts), UdpPortManager: ports.NewPortManager("udp", cfg.ProxyBindAddr, cfg.AllowPorts),
}, },
httpVhostRouter: vhost.NewVhostRouters(),
tlsConfig: generateTLSConfig(),
} }
// Init group controller // Init group controller
svr.rc.TcpGroupCtl = group.NewTcpGroupCtl(svr.rc.TcpPortManager) svr.rc.TcpGroupCtl = group.NewTcpGroupCtl(svr.rc.TcpPortManager)
// Init HTTP group controller
svr.rc.HTTPGroupCtl = group.NewHTTPGroupController(svr.httpVhostRouter)
// Init assets // Init assets
err = assets.Load(cfg.AssetsDir) err = assets.Load(cfg.AssetsDir)
if err != nil { if err != nil {
@@ -96,6 +115,9 @@ func NewService() (svr *Service, err error) {
return return
} }
// Init 404 not found page
vhost.NotFoundPagePath = cfg.Custom404Page
var ( var (
httpMuxOn bool httpMuxOn bool
httpsMuxOn bool httpsMuxOn bool
@@ -144,7 +166,7 @@ func NewService() (svr *Service, err error) {
if cfg.VhostHttpPort > 0 { if cfg.VhostHttpPort > 0 {
rp := vhost.NewHttpReverseProxy(vhost.HttpReverseProxyOptions{ rp := vhost.NewHttpReverseProxy(vhost.HttpReverseProxyOptions{
ResponseHeaderTimeoutS: cfg.VhostHttpTimeout, ResponseHeaderTimeoutS: cfg.VhostHttpTimeout,
}) }, svr.httpVhostRouter)
svr.rc.HttpReverseProxy = rp svr.rc.HttpReverseProxy = rp
address := fmt.Sprintf("%s:%d", cfg.ProxyBindAddr, cfg.VhostHttpPort) address := fmt.Sprintf("%s:%d", cfg.ProxyBindAddr, cfg.VhostHttpPort)
@@ -187,6 +209,12 @@ func NewService() (svr *Service, err error) {
log.Info("https service listen on %s:%d", cfg.ProxyBindAddr, cfg.VhostHttpsPort) log.Info("https service listen on %s:%d", cfg.ProxyBindAddr, cfg.VhostHttpsPort)
} }
// frp tls listener
tlsListener := svr.muxer.Listen(1, 1, func(data []byte) bool {
return int(data[0]) == frpNet.FRP_TLS_HEAD_BYTE
})
svr.tlsListener = frpNet.WrapLogListener(tlsListener)
// Create nat hole controller. // Create nat hole controller.
if cfg.BindUdpPort > 0 { if cfg.BindUdpPort > 0 {
var nc *nathole.NatHoleController var nc *nathole.NatHoleController
@@ -225,6 +253,7 @@ func (svr *Service) Run() {
} }
go svr.HandleListener(svr.websocketListener) go svr.HandleListener(svr.websocketListener)
go svr.HandleListener(svr.tlsListener)
svr.HandleListener(svr.listener) svr.HandleListener(svr.listener)
} }
@@ -238,6 +267,16 @@ func (svr *Service) HandleListener(l frpNet.Listener) {
return return
} }
log.Trace("start check TLS connection...")
originConn := c
c, err = frpNet.CheckAndEnableTLSServerConnWithTimeout(c, svr.tlsConfig, connReadTimeout)
if err != nil {
log.Warn("CheckAndEnableTLSServerConnWithTimeout error: %v", err)
originConn.Close()
continue
}
log.Trace("success check TLS connection")
// Start a new goroutine for dealing connections. // Start a new goroutine for dealing connections.
go func(frpConn frpNet.Conn) { go func(frpConn frpNet.Conn) {
dealFn := func(conn frpNet.Conn) { dealFn := func(conn frpNet.Conn) {
@@ -373,3 +412,24 @@ func (svr *Service) RegisterVisitorConn(visitorConn frpNet.Conn, newMsg *msg.New
return svr.rc.VisitorManager.NewConn(newMsg.ProxyName, visitorConn, newMsg.Timestamp, newMsg.SignKey, return svr.rc.VisitorManager.NewConn(newMsg.ProxyName, visitorConn, newMsg.Timestamp, newMsg.SignKey,
newMsg.UseEncryption, newMsg.UseCompression) newMsg.UseEncryption, newMsg.UseCompression)
} }
// Setup a bare-bones TLS config for the server
func generateTLSConfig() *tls.Config {
key, err := rsa.GenerateKey(rand.Reader, 1024)
if err != nil {
panic(err)
}
template := x509.Certificate{SerialNumber: big.NewInt(1)}
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
if err != nil {
panic(err)
}
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
if err != nil {
panic(err)
}
return &tls.Config{Certificates: []tls.Certificate{tlsCert}}
}

View File

@@ -127,6 +127,12 @@ custom_domains = test6.frp.com
host_header_rewrite = test6.frp.com host_header_rewrite = test6.frp.com
header_X-From-Where = frp header_X-From-Where = frp
[wildcard_http]
type = http
local_ip = 127.0.0.1
local_port = 10704
custom_domains = *.frp1.com
[subhost01] [subhost01]
type = http type = http
local_ip = 127.0.0.1 local_ip = 127.0.0.1

View File

@@ -19,7 +19,7 @@ func TestCmdTcp(t *testing.T) {
if assert.NoError(err) { if assert.NoError(err) {
defer s.Stop() defer s.Stop()
} }
time.Sleep(100 * time.Millisecond) time.Sleep(200 * time.Millisecond)
c := util.NewProcess(consts.FRPC_BIN_PATH, []string{"tcp", "-s", "127.0.0.1:20000", "-t", "123", "-u", "test", c := util.NewProcess(consts.FRPC_BIN_PATH, []string{"tcp", "-s", "127.0.0.1:20000", "-t", "123", "-u", "test",
"-l", "10701", "-r", "20801", "-n", "tcp_test"}) "-l", "10701", "-r", "20801", "-n", "tcp_test"})
@@ -27,7 +27,7 @@ func TestCmdTcp(t *testing.T) {
if assert.NoError(err) { if assert.NoError(err) {
defer c.Stop() defer c.Stop()
} }
time.Sleep(250 * time.Millisecond) time.Sleep(500 * time.Millisecond)
res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR) res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)
assert.NoError(err) assert.NoError(err)
@@ -43,7 +43,7 @@ func TestCmdUdp(t *testing.T) {
if assert.NoError(err) { if assert.NoError(err) {
defer s.Stop() defer s.Stop()
} }
time.Sleep(100 * time.Millisecond) time.Sleep(200 * time.Millisecond)
c := util.NewProcess(consts.FRPC_BIN_PATH, []string{"udp", "-s", "127.0.0.1:20000", "-t", "123", "-u", "test", c := util.NewProcess(consts.FRPC_BIN_PATH, []string{"udp", "-s", "127.0.0.1:20000", "-t", "123", "-u", "test",
"-l", "10702", "-r", "20802", "-n", "udp_test"}) "-l", "10702", "-r", "20802", "-n", "udp_test"})
@@ -51,7 +51,7 @@ func TestCmdUdp(t *testing.T) {
if assert.NoError(err) { if assert.NoError(err) {
defer c.Stop() defer c.Stop()
} }
time.Sleep(250 * time.Millisecond) time.Sleep(500 * time.Millisecond)
res, err := util.SendUdpMsg("127.0.0.1:20802", consts.TEST_UDP_ECHO_STR) res, err := util.SendUdpMsg("127.0.0.1:20802", consts.TEST_UDP_ECHO_STR)
assert.NoError(err) assert.NoError(err)
@@ -67,7 +67,7 @@ func TestCmdHttp(t *testing.T) {
if assert.NoError(err) { if assert.NoError(err) {
defer s.Stop() defer s.Stop()
} }
time.Sleep(100 * time.Millisecond) time.Sleep(200 * time.Millisecond)
c := util.NewProcess(consts.FRPC_BIN_PATH, []string{"http", "-s", "127.0.0.1:20000", "-t", "123", "-u", "test", c := util.NewProcess(consts.FRPC_BIN_PATH, []string{"http", "-s", "127.0.0.1:20000", "-t", "123", "-u", "test",
"-n", "udp_test", "-l", "10704", "--custom_domain", "127.0.0.1"}) "-n", "udp_test", "-l", "10704", "--custom_domain", "127.0.0.1"})
@@ -75,7 +75,7 @@ func TestCmdHttp(t *testing.T) {
if assert.NoError(err) { if assert.NoError(err) {
defer c.Stop() defer c.Stop()
} }
time.Sleep(250 * time.Millisecond) time.Sleep(500 * time.Millisecond)
code, body, _, err := util.SendHttpMsg("GET", "http://127.0.0.1:20001", "", nil, "") code, body, _, err := util.SendHttpMsg("GET", "http://127.0.0.1:20001", "", nil, "")
if assert.NoError(err) { if assert.NoError(err) {

View File

@@ -72,7 +72,7 @@ health_check_url = /health
func TestHealthCheck(t *testing.T) { func TestHealthCheck(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
// ****** start backgroud services ****** // ****** start background services ******
echoSvc1 := mock.NewEchoServer(15001, 1, "echo1") echoSvc1 := mock.NewEchoServer(15001, 1, "echo1")
err := echoSvc1.Start() err := echoSvc1.Start()
if assert.NoError(err) { if assert.NoError(err) {

View File

@@ -182,6 +182,21 @@ func TestHttp(t *testing.T) {
assert.Equal("true", header.Get("X-Header-Set")) assert.Equal("true", header.Get("X-Header-Set"))
} }
// wildcard_http
// test.frp1.com match *.frp1.com
code, body, _, err = util.SendHttpMsg("GET", fmt.Sprintf("http://127.0.0.1:%d", consts.TEST_HTTP_FRP_PORT), "test.frp1.com", nil, "")
if assert.NoError(err) {
assert.Equal(200, code)
assert.Equal(consts.TEST_HTTP_NORMAL_STR, body)
}
// new.test.frp1.com also match *.frp1.com
code, body, _, err = util.SendHttpMsg("GET", fmt.Sprintf("http://127.0.0.1:%d", consts.TEST_HTTP_FRP_PORT), "new.test.frp1.com", nil, "")
if assert.NoError(err) {
assert.Equal(200, code)
assert.Equal(consts.TEST_HTTP_NORMAL_STR, body)
}
// subhost01 // subhost01
code, body, _, err = util.SendHttpMsg("GET", fmt.Sprintf("http://127.0.0.1:%d", consts.TEST_HTTP_FRP_PORT), "test01.sub.com", nil, "") code, body, _, err = util.SendHttpMsg("GET", fmt.Sprintf("http://127.0.0.1:%d", consts.TEST_HTTP_FRP_PORT), "test01.sub.com", nil, "")
if assert.NoError(err) { if assert.NoError(err) {

View File

@@ -56,14 +56,14 @@ func TestReconnect(t *testing.T) {
defer frpsProcess.Stop() defer frpsProcess.Stop()
} }
time.Sleep(100 * time.Millisecond) time.Sleep(200 * time.Millisecond)
frpcProcess := util.NewProcess(consts.FRPC_BIN_PATH, []string{"-c", frpcCfgPath}) frpcProcess := util.NewProcess(consts.FRPC_BIN_PATH, []string{"-c", frpcCfgPath})
err = frpcProcess.Start() err = frpcProcess.Start()
if assert.NoError(err) { if assert.NoError(err) {
defer frpcProcess.Stop() defer frpcProcess.Stop()
} }
time.Sleep(250 * time.Millisecond) time.Sleep(500 * time.Millisecond)
// test tcp // test tcp
res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR) res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)
@@ -72,7 +72,7 @@ func TestReconnect(t *testing.T) {
// stop frpc // stop frpc
frpcProcess.Stop() frpcProcess.Stop()
time.Sleep(100 * time.Millisecond) time.Sleep(200 * time.Millisecond)
// test tcp, expect failed // test tcp, expect failed
_, err = util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR) _, err = util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)
@@ -84,7 +84,7 @@ func TestReconnect(t *testing.T) {
if assert.NoError(err) { if assert.NoError(err) {
defer newFrpcProcess.Stop() defer newFrpcProcess.Stop()
} }
time.Sleep(250 * time.Millisecond) time.Sleep(500 * time.Millisecond)
// test tcp // test tcp
res, err = util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR) res, err = util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)
@@ -93,7 +93,7 @@ func TestReconnect(t *testing.T) {
// stop frps // stop frps
frpsProcess.Stop() frpsProcess.Stop()
time.Sleep(100 * time.Millisecond) time.Sleep(200 * time.Millisecond)
// test tcp, expect failed // test tcp, expect failed
_, err = util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR) _, err = util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)

View File

@@ -94,7 +94,7 @@ func TestReload(t *testing.T) {
defer frpsProcess.Stop() defer frpsProcess.Stop()
} }
time.Sleep(100 * time.Millisecond) time.Sleep(200 * time.Millisecond)
frpcProcess := util.NewProcess(consts.FRPC_BIN_PATH, []string{"-c", frpcCfgPath}) frpcProcess := util.NewProcess(consts.FRPC_BIN_PATH, []string{"-c", frpcCfgPath})
err = frpcProcess.Start() err = frpcProcess.Start()
@@ -102,7 +102,7 @@ func TestReload(t *testing.T) {
defer frpcProcess.Stop() defer frpcProcess.Stop()
} }
time.Sleep(250 * time.Millisecond) time.Sleep(500 * time.Millisecond)
// test tcp1 // test tcp1
res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR) res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)

View File

@@ -55,7 +55,7 @@ func TestConfTemplate(t *testing.T) {
defer frpsProcess.Stop() defer frpsProcess.Stop()
} }
time.Sleep(100 * time.Millisecond) time.Sleep(200 * time.Millisecond)
frpcProcess := util.NewProcess("env", []string{"FRP_TOKEN=123456", "TCP_REMOTE_PORT=20801", consts.FRPC_BIN_PATH, "-c", frpcCfgPath}) frpcProcess := util.NewProcess("env", []string{"FRP_TOKEN=123456", "TCP_REMOTE_PORT=20801", consts.FRPC_BIN_PATH, "-c", frpcCfgPath})
err = frpcProcess.Start() err = frpcProcess.Start()
@@ -63,7 +63,7 @@ func TestConfTemplate(t *testing.T) {
defer frpcProcess.Stop() defer frpcProcess.Stop()
} }
time.Sleep(250 * time.Millisecond) time.Sleep(500 * time.Millisecond)
// test tcp1 // test tcp1
res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR) res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)

188
tests/ci/tls_test.go Normal file
View File

@@ -0,0 +1,188 @@
package ci
import (
"os"
"testing"
"time"
"github.com/fatedier/frp/tests/config"
"github.com/fatedier/frp/tests/consts"
"github.com/fatedier/frp/tests/util"
"github.com/stretchr/testify/assert"
)
const FRPS_TLS_TCP_CONF = `
[common]
bind_addr = 0.0.0.0
bind_port = 20000
log_file = console
log_level = debug
token = 123456
`
const FRPC_TLS_TCP_CONF = `
[common]
server_addr = 127.0.0.1
server_port = 20000
log_file = console
log_level = debug
token = 123456
protocol = tcp
tls_enable = true
[tcp]
type = tcp
local_port = 10701
remote_port = 20801
`
func TestTlsOverTCP(t *testing.T) {
assert := assert.New(t)
frpsCfgPath, err := config.GenerateConfigFile(consts.FRPS_NORMAL_CONFIG, FRPS_TLS_TCP_CONF)
if assert.NoError(err) {
defer os.Remove(frpsCfgPath)
}
frpcCfgPath, err := config.GenerateConfigFile(consts.FRPC_NORMAL_CONFIG, FRPC_TLS_TCP_CONF)
if assert.NoError(err) {
defer os.Remove(frpcCfgPath)
}
frpsProcess := util.NewProcess(consts.FRPS_BIN_PATH, []string{"-c", frpsCfgPath})
err = frpsProcess.Start()
if assert.NoError(err) {
defer frpsProcess.Stop()
}
time.Sleep(200 * time.Millisecond)
frpcProcess := util.NewProcess(consts.FRPC_BIN_PATH, []string{"-c", frpcCfgPath})
err = frpcProcess.Start()
if assert.NoError(err) {
defer frpcProcess.Stop()
}
time.Sleep(500 * time.Millisecond)
// test tcp
res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)
assert.NoError(err)
assert.Equal(consts.TEST_TCP_ECHO_STR, res)
}
const FRPS_TLS_KCP_CONF = `
[common]
bind_addr = 0.0.0.0
bind_port = 20000
kcp_bind_port = 20000
log_file = console
log_level = debug
token = 123456
`
const FRPC_TLS_KCP_CONF = `
[common]
server_addr = 127.0.0.1
server_port = 20000
log_file = console
log_level = debug
token = 123456
protocol = kcp
tls_enable = true
[tcp]
type = tcp
local_port = 10701
remote_port = 20801
`
func TestTLSOverKCP(t *testing.T) {
assert := assert.New(t)
frpsCfgPath, err := config.GenerateConfigFile(consts.FRPS_NORMAL_CONFIG, FRPS_TLS_KCP_CONF)
if assert.NoError(err) {
defer os.Remove(frpsCfgPath)
}
frpcCfgPath, err := config.GenerateConfigFile(consts.FRPC_NORMAL_CONFIG, FRPC_TLS_KCP_CONF)
if assert.NoError(err) {
defer os.Remove(frpcCfgPath)
}
frpsProcess := util.NewProcess(consts.FRPS_BIN_PATH, []string{"-c", frpsCfgPath})
err = frpsProcess.Start()
if assert.NoError(err) {
defer frpsProcess.Stop()
}
time.Sleep(200 * time.Millisecond)
frpcProcess := util.NewProcess(consts.FRPC_BIN_PATH, []string{"-c", frpcCfgPath})
err = frpcProcess.Start()
if assert.NoError(err) {
defer frpcProcess.Stop()
}
time.Sleep(500 * time.Millisecond)
// test tcp
res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)
assert.NoError(err)
assert.Equal(consts.TEST_TCP_ECHO_STR, res)
}
const FRPS_TLS_WS_CONF = `
[common]
bind_addr = 0.0.0.0
bind_port = 20000
log_file = console
log_level = debug
token = 123456
`
const FRPC_TLS_WS_CONF = `
[common]
server_addr = 127.0.0.1
server_port = 20000
log_file = console
log_level = debug
token = 123456
protocol = websocket
tls_enable = true
[tcp]
type = tcp
local_port = 10701
remote_port = 20801
`
func TestTLSOverWebsocket(t *testing.T) {
assert := assert.New(t)
frpsCfgPath, err := config.GenerateConfigFile(consts.FRPS_NORMAL_CONFIG, FRPS_TLS_WS_CONF)
if assert.NoError(err) {
defer os.Remove(frpsCfgPath)
}
frpcCfgPath, err := config.GenerateConfigFile(consts.FRPC_NORMAL_CONFIG, FRPC_TLS_WS_CONF)
if assert.NoError(err) {
defer os.Remove(frpcCfgPath)
}
frpsProcess := util.NewProcess(consts.FRPS_BIN_PATH, []string{"-c", frpsCfgPath})
err = frpsProcess.Start()
if assert.NoError(err) {
defer frpsProcess.Stop()
}
time.Sleep(200 * time.Millisecond)
frpcProcess := util.NewProcess(consts.FRPC_BIN_PATH, []string{"-c", frpcCfgPath})
err = frpcProcess.Start()
if assert.NoError(err) {
defer frpcProcess.Stop()
}
time.Sleep(500 * time.Millisecond)
// test tcp
res, err := util.SendTcpMsg("127.0.0.1:20801", consts.TEST_TCP_ECHO_STR)
assert.NoError(err)
assert.Equal(consts.TEST_TCP_ECHO_STR, res)
}

View File

@@ -88,8 +88,10 @@ func handleHttp(w http.ResponseWriter, r *http.Request) {
return return
} }
if strings.Contains(r.Host, "127.0.0.1") || strings.Contains(r.Host, "test2.frp.com") || if strings.HasPrefix(r.Host, "127.0.0.1") || strings.HasPrefix(r.Host, "test2.frp.com") ||
strings.Contains(r.Host, "test5.frp.com") || strings.Contains(r.Host, "test6.frp.com") { strings.HasPrefix(r.Host, "test5.frp.com") || strings.HasPrefix(r.Host, "test6.frp.com") ||
strings.HasPrefix(r.Host, "test.frp1.com") || strings.HasPrefix(r.Host, "new.test.frp1.com") {
w.WriteHeader(200) w.WriteHeader(200)
w.Write([]byte(consts.TEST_HTTP_NORMAL_STR)) w.Write([]byte(consts.TEST_HTTP_NORMAL_STR))
} else if strings.Contains(r.Host, "test3.frp.com") { } else if strings.Contains(r.Host, "test3.frp.com") {

View File

@@ -28,51 +28,51 @@ func GetProxyStatus(statusAddr string, user string, passwd string, name string)
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return status, err return status, err
} else { }
if resp.StatusCode != 200 { defer resp.Body.Close()
return status, fmt.Errorf("admin api status code [%d]", resp.StatusCode) if resp.StatusCode != 200 {
} return status, fmt.Errorf("admin api status code [%d]", resp.StatusCode)
defer resp.Body.Close() }
body, err := ioutil.ReadAll(resp.Body) body, err := ioutil.ReadAll(resp.Body)
if err != nil { if err != nil {
return status, err return status, err
} }
allStatus := &client.StatusResp{} allStatus := &client.StatusResp{}
err = json.Unmarshal(body, &allStatus) err = json.Unmarshal(body, &allStatus)
if err != nil { if err != nil {
return status, fmt.Errorf("unmarshal http response error: %s", strings.TrimSpace(string(body))) return status, fmt.Errorf("unmarshal http response error: %s", strings.TrimSpace(string(body)))
} }
for _, s := range allStatus.Tcp { for _, s := range allStatus.Tcp {
if s.Name == name { if s.Name == name {
return &s, nil return &s, nil
}
}
for _, s := range allStatus.Udp {
if s.Name == name {
return &s, nil
}
}
for _, s := range allStatus.Http {
if s.Name == name {
return &s, nil
}
}
for _, s := range allStatus.Https {
if s.Name == name {
return &s, nil
}
}
for _, s := range allStatus.Stcp {
if s.Name == name {
return &s, nil
}
}
for _, s := range allStatus.Xtcp {
if s.Name == name {
return &s, nil
}
} }
} }
for _, s := range allStatus.Udp {
if s.Name == name {
return &s, nil
}
}
for _, s := range allStatus.Http {
if s.Name == name {
return &s, nil
}
}
for _, s := range allStatus.Https {
if s.Name == name {
return &s, nil
}
}
for _, s := range allStatus.Stcp {
if s.Name == name {
return &s, nil
}
}
for _, s := range allStatus.Xtcp {
if s.Name == name {
return &s, nil
}
}
return status, errors.New("no proxy status found") return status, errors.New("no proxy status found")
} }
@@ -87,13 +87,13 @@ func ReloadConf(reloadAddr string, user string, passwd string) error {
resp, err := http.DefaultClient.Do(req) resp, err := http.DefaultClient.Do(req)
if err != nil { if err != nil {
return err return err
} else {
if resp.StatusCode != 200 {
return fmt.Errorf("admin api status code [%d]", resp.StatusCode)
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
} }
defer resp.Body.Close()
if resp.StatusCode != 200 {
return fmt.Errorf("admin api status code [%d]", resp.StatusCode)
}
io.Copy(ioutil.Discard, resp.Body)
return nil return nil
} }

View File

@@ -15,6 +15,7 @@
package net package net
import ( import (
"crypto/tls"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@@ -207,3 +208,17 @@ func ConnectServerByProxy(proxyUrl string, protocol string, addr string) (c Conn
return nil, fmt.Errorf("unsupport protocol: %s", protocol) return nil, fmt.Errorf("unsupport protocol: %s", protocol)
} }
} }
func ConnectServerByProxyWithTLS(proxyUrl string, protocol string, addr string, tlsConfig *tls.Config) (c Conn, err error) {
c, err = ConnectServerByProxy(proxyUrl, protocol, addr)
if err != nil {
return
}
if tlsConfig == nil {
return
}
c = WrapTLSClientConn(c, tlsConfig)
return
}

52
utils/net/tls.go Normal file
View File

@@ -0,0 +1,52 @@
// Copyright 2019 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package net
import (
"crypto/tls"
"net"
"time"
gnet "github.com/fatedier/golib/net"
)
var (
FRP_TLS_HEAD_BYTE = 0x17
)
func WrapTLSClientConn(c net.Conn, tlsConfig *tls.Config) (out Conn) {
c.Write([]byte{byte(FRP_TLS_HEAD_BYTE)})
out = WrapConn(tls.Client(c, tlsConfig))
return
}
func CheckAndEnableTLSServerConnWithTimeout(c net.Conn, tlsConfig *tls.Config, timeout time.Duration) (out Conn, err error) {
sc, r := gnet.NewSharedConnSize(c, 2)
buf := make([]byte, 1)
var n int
c.SetReadDeadline(time.Now().Add(timeout))
n, err = r.Read(buf)
c.SetReadDeadline(time.Time{})
if err != nil {
return
}
if n == 1 && int(buf[0]) == FRP_TLS_HEAD_BYTE {
out = WrapConn(tls.Server(c, tlsConfig))
} else {
out = WrapConn(sc)
}
return
}

View File

@@ -19,7 +19,7 @@ import (
"strings" "strings"
) )
var version string = "0.24.0" var version string = "0.28.2"
func Full() string { func Full() string {
return version return version

View File

@@ -1,4 +1,4 @@
// Copyright 2016 fatedier, fatedier@gmail.com // Copyright 2017 fatedier, fatedier@gmail.com
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -15,221 +15,202 @@
package vhost package vhost
import ( import (
"bufio"
"bytes" "bytes"
"encoding/base64" "context"
"errors"
"fmt" "fmt"
"io" "log"
"net"
"net/http" "net/http"
"net/url"
"strings" "strings"
"time" "time"
frpNet "github.com/fatedier/frp/utils/net" frpLog "github.com/fatedier/frp/utils/log"
gnet "github.com/fatedier/golib/net"
"github.com/fatedier/golib/pool" "github.com/fatedier/golib/pool"
) )
type HttpMuxer struct { var (
*VhostMuxer ErrNoDomain = errors.New("no such domain")
)
func getHostFromAddr(addr string) (host string) {
strs := strings.Split(addr, ":")
if len(strs) > 1 {
host = strs[0]
} else {
host = addr
}
return
} }
func GetHttpRequestInfo(c frpNet.Conn) (_ frpNet.Conn, _ map[string]string, err error) { type HttpReverseProxyOptions struct {
reqInfoMap := make(map[string]string, 0) ResponseHeaderTimeoutS int64
sc, rd := gnet.NewSharedConn(c)
request, err := http.ReadRequest(bufio.NewReader(rd))
if err != nil {
return nil, reqInfoMap, err
}
// hostName
tmpArr := strings.Split(request.Host, ":")
reqInfoMap["Host"] = tmpArr[0]
reqInfoMap["Path"] = request.URL.Path
reqInfoMap["Scheme"] = request.URL.Scheme
// Authorization
authStr := request.Header.Get("Authorization")
if authStr != "" {
reqInfoMap["Authorization"] = authStr
}
request.Body.Close()
return frpNet.WrapConn(sc), reqInfoMap, nil
} }
func NewHttpMuxer(listener frpNet.Listener, timeout time.Duration) (*HttpMuxer, error) { type HttpReverseProxy struct {
mux, err := NewVhostMuxer(listener, GetHttpRequestInfo, HttpAuthFunc, ModifyHttpRequest, timeout) proxy *ReverseProxy
return &HttpMuxer{mux}, err vhostRouter *VhostRouters
responseHeaderTimeout time.Duration
} }
func ModifyHttpRequest(c frpNet.Conn, rewriteHost string) (_ frpNet.Conn, err error) { func NewHttpReverseProxy(option HttpReverseProxyOptions, vhostRouter *VhostRouters) *HttpReverseProxy {
sc, rd := gnet.NewSharedConn(c) if option.ResponseHeaderTimeoutS <= 0 {
var buff []byte option.ResponseHeaderTimeoutS = 60
remoteIP := strings.Split(c.RemoteAddr().String(), ":")[0]
if buff, err = hostNameRewrite(rd, rewriteHost, remoteIP); err != nil {
return nil, err
} }
err = sc.ResetBuf(buff) rp := &HttpReverseProxy{
return frpNet.WrapConn(sc), err responseHeaderTimeout: time.Duration(option.ResponseHeaderTimeoutS) * time.Second,
} vhostRouter: vhostRouter,
func hostNameRewrite(request io.Reader, rewriteHost string, remoteIP string) (_ []byte, err error) {
buf := pool.GetBuf(1024)
defer pool.PutBuf(buf)
var n int
n, err = request.Read(buf)
if err != nil {
return
} }
retBuffer, err := parseRequest(buf[:n], rewriteHost, remoteIP) proxy := &ReverseProxy{
return retBuffer, err Director: func(req *http.Request) {
} req.URL.Scheme = "http"
url := req.Context().Value("url").(string)
func parseRequest(org []byte, rewriteHost string, remoteIP string) (ret []byte, err error) { oldHost := getHostFromAddr(req.Context().Value("host").(string))
tp := bytes.NewBuffer(org) host := rp.GetRealHost(oldHost, url)
// First line: GET /index.html HTTP/1.0 if host != "" {
var b []byte req.Host = host
if b, err = tp.ReadBytes('\n'); err != nil {
return nil, err
}
req := new(http.Request)
// we invoked ReadRequest in GetHttpHostname before, so we ignore error
req.Method, req.RequestURI, req.Proto, _ = parseRequestLine(string(b))
rawurl := req.RequestURI
// CONNECT www.google.com:443 HTTP/1.1
justAuthority := req.Method == "CONNECT" && !strings.HasPrefix(rawurl, "/")
if justAuthority {
rawurl = "http://" + rawurl
}
req.URL, _ = url.ParseRequestURI(rawurl)
if justAuthority {
// Strip the bogus "http://" back off.
req.URL.Scheme = ""
}
// RFC2616: first case
// GET /index.html HTTP/1.1
// Host: www.google.com
if req.URL.Host == "" {
var changedBuf []byte
if rewriteHost != "" {
changedBuf, err = changeHostName(tp, rewriteHost)
}
buf := new(bytes.Buffer)
buf.Write(b)
buf.WriteString(fmt.Sprintf("X-Forwarded-For: %s\r\n", remoteIP))
buf.WriteString(fmt.Sprintf("X-Real-IP: %s\r\n", remoteIP))
if len(changedBuf) == 0 {
tp.WriteTo(buf)
} else {
buf.Write(changedBuf)
}
return buf.Bytes(), err
}
// RFC2616: second case
// GET http://www.google.com/index.html HTTP/1.1
// Host: doesntmatter
// In this case, any Host line is ignored.
if rewriteHost != "" {
hostPort := strings.Split(req.URL.Host, ":")
if len(hostPort) == 1 {
req.URL.Host = rewriteHost
} else if len(hostPort) == 2 {
req.URL.Host = fmt.Sprintf("%s:%s", rewriteHost, hostPort[1])
}
}
firstLine := req.Method + " " + req.URL.String() + " " + req.Proto
buf := new(bytes.Buffer)
buf.WriteString(firstLine)
buf.WriteString(fmt.Sprintf("X-Forwarded-For: %s\r\n", remoteIP))
buf.WriteString(fmt.Sprintf("X-Real-IP: %s\r\n", remoteIP))
tp.WriteTo(buf)
return buf.Bytes(), err
}
// parseRequestLine parses "GET /foo HTTP/1.1" into its three parts.
func parseRequestLine(line string) (method, requestURI, proto string, ok bool) {
s1 := strings.Index(line, " ")
s2 := strings.Index(line[s1+1:], " ")
if s1 < 0 || s2 < 0 {
return
}
s2 += s1 + 1
return line[:s1], line[s1+1 : s2], line[s2+1:], true
}
func changeHostName(buff *bytes.Buffer, rewriteHost string) (_ []byte, err error) {
retBuf := new(bytes.Buffer)
peek := buff.Bytes()
for len(peek) > 0 {
i := bytes.IndexByte(peek, '\n')
if i < 3 {
// Not present (-1) or found within the next few bytes,
// implying we're at the end ("\r\n\r\n" or "\n\n")
return nil, err
}
kv := peek[:i]
j := bytes.IndexByte(kv, ':')
if j < 0 {
return nil, fmt.Errorf("malformed MIME header line: " + string(kv))
}
if strings.Contains(strings.ToLower(string(kv[:j])), "host") {
var hostHeader string
portPos := bytes.IndexByte(kv[j+1:], ':')
if portPos == -1 {
hostHeader = fmt.Sprintf("Host: %s\r\n", rewriteHost)
} else {
hostHeader = fmt.Sprintf("Host: %s:%s\r\n", rewriteHost, kv[j+portPos+2:])
} }
retBuf.WriteString(hostHeader) req.URL.Host = req.Host
peek = peek[i+1:]
break headers := rp.GetHeaders(oldHost, url)
} else { for k, v := range headers {
retBuf.Write(peek[:i]) req.Header.Set(k, v)
retBuf.WriteByte('\n') }
},
Transport: &http.Transport{
ResponseHeaderTimeout: rp.responseHeaderTimeout,
DisableKeepAlives: true,
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
url := ctx.Value("url").(string)
host := getHostFromAddr(ctx.Value("host").(string))
remote := ctx.Value("remote").(string)
return rp.CreateConnection(host, url, remote)
},
},
BufferPool: newWrapPool(),
ErrorLog: log.New(newWrapLogger(), "", 0),
ErrorHandler: func(rw http.ResponseWriter, req *http.Request, err error) {
frpLog.Warn("do http proxy request error: %v", err)
rw.WriteHeader(http.StatusNotFound)
rw.Write(getNotFoundPageContent())
},
}
rp.proxy = proxy
return rp
}
// Register register the route config to reverse proxy
// reverse proxy will use CreateConnFn from routeCfg to create a connection to the remote service
func (rp *HttpReverseProxy) Register(routeCfg VhostRouteConfig) error {
err := rp.vhostRouter.Add(routeCfg.Domain, routeCfg.Location, &routeCfg)
if err != nil {
return err
}
return nil
}
// UnRegister unregister route config by domain and location
func (rp *HttpReverseProxy) UnRegister(domain string, location string) {
rp.vhostRouter.Del(domain, location)
}
func (rp *HttpReverseProxy) GetRealHost(domain string, location string) (host string) {
vr, ok := rp.getVhost(domain, location)
if ok {
host = vr.payload.(*VhostRouteConfig).RewriteHost
}
return
}
func (rp *HttpReverseProxy) GetHeaders(domain string, location string) (headers map[string]string) {
vr, ok := rp.getVhost(domain, location)
if ok {
headers = vr.payload.(*VhostRouteConfig).Headers
}
return
}
// CreateConnection create a new connection by route config
func (rp *HttpReverseProxy) CreateConnection(domain string, location string, remoteAddr string) (net.Conn, error) {
vr, ok := rp.getVhost(domain, location)
if ok {
fn := vr.payload.(*VhostRouteConfig).CreateConnFn
if fn != nil {
return fn(remoteAddr)
}
}
return nil, fmt.Errorf("%v: %s %s", ErrNoDomain, domain, location)
}
func (rp *HttpReverseProxy) CheckAuth(domain, location, user, passwd string) bool {
vr, ok := rp.getVhost(domain, location)
if ok {
checkUser := vr.payload.(*VhostRouteConfig).Username
checkPasswd := vr.payload.(*VhostRouteConfig).Password
if (checkUser != "" || checkPasswd != "") && (checkUser != user || checkPasswd != passwd) {
return false
}
}
return true
}
// getVhost get vhost router by domain and location
func (rp *HttpReverseProxy) getVhost(domain string, location string) (vr *VhostRouter, ok bool) {
// first we check the full hostname
// if not exist, then check the wildcard_domain such as *.example.com
vr, ok = rp.vhostRouter.Get(domain, location)
if ok {
return
}
domainSplit := strings.Split(domain, ".")
if len(domainSplit) < 3 {
return nil, false
}
for {
if len(domainSplit) < 3 {
return nil, false
} }
peek = peek[i+1:] domainSplit[0] = "*"
domain = strings.Join(domainSplit, ".")
vr, ok = rp.vhostRouter.Get(domain, location)
if ok {
return vr, true
}
domainSplit = domainSplit[1:]
} }
retBuf.Write(peek) return
return retBuf.Bytes(), err
} }
func HttpAuthFunc(c frpNet.Conn, userName, passWord, authorization string) (bAccess bool, err error) { func (rp *HttpReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
s := strings.SplitN(authorization, " ", 2) domain := getHostFromAddr(req.Host)
if len(s) != 2 { location := req.URL.Path
res := noAuthResponse() user, passwd, _ := req.BasicAuth()
res.Write(c) if !rp.CheckAuth(domain, location, user, passwd) {
rw.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return return
} }
b, err := base64.StdEncoding.DecodeString(s[1]) rp.proxy.ServeHTTP(rw, req)
if err != nil {
return
}
pair := strings.SplitN(string(b), ":", 2)
if len(pair) != 2 {
return
}
if pair[0] != userName || pair[1] != passWord {
return
}
return true, nil
} }
func noAuthResponse() *http.Response { type wrapPool struct{}
header := make(map[string][]string)
header["WWW-Authenticate"] = []string{`Basic realm="Restricted"`} func newWrapPool() *wrapPool { return &wrapPool{} }
res := &http.Response{
Status: "401 Not authorized", func (p *wrapPool) Get() []byte { return pool.GetBuf(32 * 1024) }
StatusCode: 401,
Proto: "HTTP/1.1", func (p *wrapPool) Put(buf []byte) { pool.PutBuf(buf) }
ProtoMajor: 1,
ProtoMinor: 1, type wrapLogger struct{}
Header: header,
} func newWrapLogger() *wrapLogger { return &wrapLogger{} }
return res
func (l *wrapLogger) Write(p []byte) (n int, err error) {
frpLog.Warn("%s", string(bytes.TrimRight(p, "\n")))
return len(p), nil
} }

View File

@@ -1,211 +0,0 @@
// Copyright 2017 fatedier, fatedier@gmail.com
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vhost
import (
"bytes"
"context"
"errors"
"log"
"net"
"net/http"
"strings"
"sync"
"time"
frpLog "github.com/fatedier/frp/utils/log"
"github.com/fatedier/golib/pool"
)
var (
ErrRouterConfigConflict = errors.New("router config conflict")
ErrNoDomain = errors.New("no such domain")
)
func getHostFromAddr(addr string) (host string) {
strs := strings.Split(addr, ":")
if len(strs) > 1 {
host = strs[0]
} else {
host = addr
}
return
}
type HttpReverseProxyOptions struct {
ResponseHeaderTimeoutS int64
}
type HttpReverseProxy struct {
proxy *ReverseProxy
vhostRouter *VhostRouters
responseHeaderTimeout time.Duration
cfgMu sync.RWMutex
}
func NewHttpReverseProxy(option HttpReverseProxyOptions) *HttpReverseProxy {
if option.ResponseHeaderTimeoutS <= 0 {
option.ResponseHeaderTimeoutS = 60
}
rp := &HttpReverseProxy{
responseHeaderTimeout: time.Duration(option.ResponseHeaderTimeoutS) * time.Second,
vhostRouter: NewVhostRouters(),
}
proxy := &ReverseProxy{
Director: func(req *http.Request) {
req.URL.Scheme = "http"
url := req.Context().Value("url").(string)
oldHost := getHostFromAddr(req.Context().Value("host").(string))
host := rp.GetRealHost(oldHost, url)
if host != "" {
req.Host = host
}
req.URL.Host = req.Host
headers := rp.GetHeaders(oldHost, url)
for k, v := range headers {
req.Header.Set(k, v)
}
},
Transport: &http.Transport{
ResponseHeaderTimeout: rp.responseHeaderTimeout,
DisableKeepAlives: true,
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
url := ctx.Value("url").(string)
host := getHostFromAddr(ctx.Value("host").(string))
return rp.CreateConnection(host, url)
},
},
WebSocketDialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
url := ctx.Value("url").(string)
host := getHostFromAddr(ctx.Value("host").(string))
return rp.CreateConnection(host, url)
},
BufferPool: newWrapPool(),
ErrorLog: log.New(newWrapLogger(), "", 0),
}
rp.proxy = proxy
return rp
}
func (rp *HttpReverseProxy) Register(routeCfg VhostRouteConfig) error {
rp.cfgMu.Lock()
defer rp.cfgMu.Unlock()
_, ok := rp.vhostRouter.Exist(routeCfg.Domain, routeCfg.Location)
if ok {
return ErrRouterConfigConflict
} else {
rp.vhostRouter.Add(routeCfg.Domain, routeCfg.Location, &routeCfg)
}
return nil
}
func (rp *HttpReverseProxy) UnRegister(domain string, location string) {
rp.cfgMu.Lock()
defer rp.cfgMu.Unlock()
rp.vhostRouter.Del(domain, location)
}
func (rp *HttpReverseProxy) GetRealHost(domain string, location string) (host string) {
vr, ok := rp.getVhost(domain, location)
if ok {
host = vr.payload.(*VhostRouteConfig).RewriteHost
}
return
}
func (rp *HttpReverseProxy) GetHeaders(domain string, location string) (headers map[string]string) {
vr, ok := rp.getVhost(domain, location)
if ok {
headers = vr.payload.(*VhostRouteConfig).Headers
}
return
}
func (rp *HttpReverseProxy) CreateConnection(domain string, location string) (net.Conn, error) {
vr, ok := rp.getVhost(domain, location)
if ok {
fn := vr.payload.(*VhostRouteConfig).CreateConnFn
if fn != nil {
return fn()
}
}
return nil, ErrNoDomain
}
func (rp *HttpReverseProxy) CheckAuth(domain, location, user, passwd string) bool {
vr, ok := rp.getVhost(domain, location)
if ok {
checkUser := vr.payload.(*VhostRouteConfig).Username
checkPasswd := vr.payload.(*VhostRouteConfig).Password
if (checkUser != "" || checkPasswd != "") && (checkUser != user || checkPasswd != passwd) {
return false
}
}
return true
}
func (rp *HttpReverseProxy) getVhost(domain string, location string) (vr *VhostRouter, ok bool) {
rp.cfgMu.RLock()
defer rp.cfgMu.RUnlock()
// first we check the full hostname
// if not exist, then check the wildcard_domain such as *.example.com
vr, ok = rp.vhostRouter.Get(domain, location)
if ok {
return
}
domainSplit := strings.Split(domain, ".")
if len(domainSplit) < 3 {
return vr, false
}
domainSplit[0] = "*"
domain = strings.Join(domainSplit, ".")
vr, ok = rp.vhostRouter.Get(domain, location)
return
}
func (rp *HttpReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
domain := getHostFromAddr(req.Host)
location := req.URL.Path
user, passwd, _ := req.BasicAuth()
if !rp.CheckAuth(domain, location, user, passwd) {
rw.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`)
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
rp.proxy.ServeHTTP(rw, req)
}
type wrapPool struct{}
func newWrapPool() *wrapPool { return &wrapPool{} }
func (p *wrapPool) Get() []byte { return pool.GetBuf(32 * 1024) }
func (p *wrapPool) Put(buf []byte) { pool.PutBuf(buf) }
type wrapLogger struct{}
func newWrapLogger() *wrapLogger { return &wrapLogger{} }
func (l *wrapLogger) Write(p []byte) (n int, err error) {
frpLog.Warn("%s", string(bytes.TrimRight(p, "\n")))
return len(p), nil
}

View File

@@ -15,13 +15,18 @@
package vhost package vhost
import ( import (
"bytes"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"strings"
frpLog "github.com/fatedier/frp/utils/log"
"github.com/fatedier/frp/utils/version" "github.com/fatedier/frp/utils/version"
) )
var (
NotFoundPagePath = ""
)
const ( const (
NotFound = `<!DOCTYPE html> NotFound = `<!DOCTYPE html>
<html> <html>
@@ -46,10 +51,28 @@ Please try again later.</p>
` `
) )
func getNotFoundPageContent() []byte {
var (
buf []byte
err error
)
if NotFoundPagePath != "" {
buf, err = ioutil.ReadFile(NotFoundPagePath)
if err != nil {
frpLog.Warn("read custom 404 page error: %v", err)
buf = []byte(NotFound)
}
} else {
buf = []byte(NotFound)
}
return buf
}
func notFoundResponse() *http.Response { func notFoundResponse() *http.Response {
header := make(http.Header) header := make(http.Header)
header.Set("server", "frp/"+version.Full()) header.Set("server", "frp/"+version.Full())
header.Set("Content-Type", "text/html") header.Set("Content-Type", "text/html")
res := &http.Response{ res := &http.Response{
Status: "Not Found", Status: "Not Found",
StatusCode: 404, StatusCode: 404,
@@ -57,7 +80,21 @@ func notFoundResponse() *http.Response {
ProtoMajor: 1, ProtoMajor: 1,
ProtoMinor: 0, ProtoMinor: 0,
Header: header, Header: header,
Body: ioutil.NopCloser(strings.NewReader(NotFound)), Body: ioutil.NopCloser(bytes.NewReader(getNotFoundPageContent())),
}
return res
}
func noAuthResponse() *http.Response {
header := make(map[string][]string)
header["WWW-Authenticate"] = []string{`Basic realm="Restricted"`}
res := &http.Response{
Status: "401 Not authorized",
StatusCode: 401,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: header,
} }
return res return res
} }

View File

@@ -8,6 +8,7 @@ package vhost
import ( import (
"context" "context"
"fmt"
"io" "io"
"log" "log"
"net" "net"
@@ -17,13 +18,9 @@ import (
"sync" "sync"
"time" "time"
frpIo "github.com/fatedier/golib/io" "golang.org/x/net/http/httpguts"
) )
// onExitFlushLoop is a callback set by tests to detect the state of the
// flushLoop() goroutine.
var onExitFlushLoop func()
// ReverseProxy is an HTTP Handler that takes an incoming request and // ReverseProxy is an HTTP Handler that takes an incoming request and
// sends it to another server, proxying the response back to the // sends it to another server, proxying the response back to the
// client. // client.
@@ -44,12 +41,17 @@ type ReverseProxy struct {
// to flush to the client while copying the // to flush to the client while copying the
// response body. // response body.
// If zero, no periodic flushing is done. // If zero, no periodic flushing is done.
// A negative value means to flush immediately
// after each write to the client.
// The FlushInterval is ignored when ReverseProxy
// recognizes a response as a streaming response;
// for such responses, writes are flushed to the client
// immediately.
FlushInterval time.Duration FlushInterval time.Duration
// ErrorLog specifies an optional logger for errors // ErrorLog specifies an optional logger for errors
// that occur when attempting to proxy the request. // that occur when attempting to proxy the request.
// If nil, logging goes to os.Stderr via the log package's // If nil, logging is done via the log package's standard logger.
// standard logger.
ErrorLog *log.Logger ErrorLog *log.Logger
// BufferPool optionally specifies a buffer pool to // BufferPool optionally specifies a buffer pool to
@@ -57,12 +59,23 @@ type ReverseProxy struct {
// copying HTTP response bodies. // copying HTTP response bodies.
BufferPool BufferPool BufferPool BufferPool
// ModifyResponse is an optional function that // ModifyResponse is an optional function that modifies the
// modifies the Response from the backend. // Response from the backend. It is called if the backend
// If it returns an error, the proxy returns a StatusBadGateway error. // returns a response at all, with any HTTP status code.
// If the backend is unreachable, the optional ErrorHandler is
// called without any call to ModifyResponse.
//
// If ModifyResponse returns an error, ErrorHandler is called
// with its error value. If ErrorHandler is nil, its default
// implementation is used.
ModifyResponse func(*http.Response) error ModifyResponse func(*http.Response) error
WebSocketDialContext func(ctx context.Context, network, addr string) (net.Conn, error) // ErrorHandler is an optional function that handles errors
// reaching the backend or errors from ModifyResponse.
//
// If nil, the default is to log the provided error and return
// a 502 Status Bad Gateway response.
ErrorHandler func(http.ResponseWriter, *http.Request, error)
} }
// A BufferPool is an interface for getting and returning temporary // A BufferPool is an interface for getting and returning temporary
@@ -118,18 +131,11 @@ func copyHeader(dst, src http.Header) {
} }
} }
func cloneHeader(h http.Header) http.Header {
h2 := make(http.Header, len(h))
for k, vv := range h {
vv2 := make([]string, len(vv))
copy(vv2, vv)
h2[k] = vv2
}
return h2
}
// Hop-by-hop headers. These are removed when sent to the backend. // Hop-by-hop headers. These are removed when sent to the backend.
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html // As of RFC 7230, hop-by-hop headers are required to appear in the
// Connection header field. These are the headers defined by the
// obsoleted RFC 2616 (section 13.5.1) and are used for backward
// compatibility.
var hopHeaders = []string{ var hopHeaders = []string{
"Connection", "Connection",
"Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google "Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google
@@ -137,54 +143,38 @@ var hopHeaders = []string{
"Proxy-Authenticate", "Proxy-Authenticate",
"Proxy-Authorization", "Proxy-Authorization",
"Te", // canonicalized version of "TE" "Te", // canonicalized version of "TE"
"Trailer", // not Trailers per URL above; http://www.rfc-editor.org/errata_search.php?eid=4522 "Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522
"Transfer-Encoding", "Transfer-Encoding",
"Upgrade", "Upgrade",
} }
func (p *ReverseProxy) defaultErrorHandler(rw http.ResponseWriter, req *http.Request, err error) {
p.logf("http: proxy error: %v", err)
rw.WriteHeader(http.StatusBadGateway)
}
func (p *ReverseProxy) getErrorHandler() func(http.ResponseWriter, *http.Request, error) {
if p.ErrorHandler != nil {
return p.ErrorHandler
}
return p.defaultErrorHandler
}
// modifyResponse conditionally runs the optional ModifyResponse hook
// and reports whether the request should proceed.
func (p *ReverseProxy) modifyResponse(rw http.ResponseWriter, res *http.Response, req *http.Request) bool {
if p.ModifyResponse == nil {
return true
}
if err := p.ModifyResponse(res); err != nil {
res.Body.Close()
p.getErrorHandler()(rw, req, err)
return false
}
return true
}
func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
if IsWebsocketRequest(req) {
p.serveWebSocket(rw, req)
} else {
p.serveHTTP(rw, req)
}
}
func (p *ReverseProxy) serveWebSocket(rw http.ResponseWriter, req *http.Request) {
if p.WebSocketDialContext == nil {
rw.WriteHeader(500)
return
}
req = req.WithContext(context.WithValue(req.Context(), "url", req.URL.Path))
req = req.WithContext(context.WithValue(req.Context(), "host", req.Host))
targetConn, err := p.WebSocketDialContext(req.Context(), "tcp", "")
if err != nil {
rw.WriteHeader(501)
return
}
defer targetConn.Close()
p.Director(req)
hijacker, ok := rw.(http.Hijacker)
if !ok {
rw.WriteHeader(500)
return
}
conn, _, errHijack := hijacker.Hijack()
if errHijack != nil {
rw.WriteHeader(500)
return
}
defer conn.Close()
req.Write(targetConn)
frpIo.Join(conn, targetConn)
}
func (p *ReverseProxy) serveHTTP(rw http.ResponseWriter, req *http.Request) {
transport := p.Transport transport := p.Transport
if transport == nil { if transport == nil {
transport = http.DefaultTransport transport = http.DefaultTransport
@@ -205,37 +195,49 @@ func (p *ReverseProxy) serveHTTP(rw http.ResponseWriter, req *http.Request) {
}() }()
} }
outreq := req.WithContext(ctx) // includes shallow copies of maps, but okay outreq := req.WithContext(ctx)
if req.ContentLength == 0 { if req.ContentLength == 0 {
outreq.Body = nil // Issue 16036: nil Body for http.Transport retries outreq.Body = nil // Issue 16036: nil Body for http.Transport retries
} }
outreq.Header = cloneHeader(req.Header) // =============================
// Modified for frp
// Modify for frp
outreq = outreq.WithContext(context.WithValue(outreq.Context(), "url", req.URL.Path)) outreq = outreq.WithContext(context.WithValue(outreq.Context(), "url", req.URL.Path))
outreq = outreq.WithContext(context.WithValue(outreq.Context(), "host", req.Host)) outreq = outreq.WithContext(context.WithValue(outreq.Context(), "host", req.Host))
outreq = outreq.WithContext(context.WithValue(outreq.Context(), "remote", req.RemoteAddr))
// =============================
p.Director(outreq) p.Director(outreq)
outreq.Close = false outreq.Close = false
// Remove hop-by-hop headers listed in the "Connection" header. reqUpType := upgradeType(outreq.Header)
// See RFC 2616, section 14.10. removeConnectionHeaders(outreq.Header)
if c := outreq.Header.Get("Connection"); c != "" {
for _, f := range strings.Split(c, ",") {
if f = strings.TrimSpace(f); f != "" {
outreq.Header.Del(f)
}
}
}
// Remove hop-by-hop headers to the backend. Especially // Remove hop-by-hop headers to the backend. Especially
// important is "Connection" because we want a persistent // important is "Connection" because we want a persistent
// connection, regardless of what the client sent to us. // connection, regardless of what the client sent to us.
for _, h := range hopHeaders { for _, h := range hopHeaders {
if outreq.Header.Get(h) != "" { hv := outreq.Header.Get(h)
outreq.Header.Del(h) if hv == "" {
continue
} }
if h == "Te" && hv == "trailers" {
// Issue 21096: tell backend applications that
// care about trailer support that we support
// trailers. (We do, but we don't go out of
// our way to advertise that unless the
// incoming client request thought it was
// worth mentioning)
continue
}
outreq.Header.Del(h)
}
// After stripping all the hop-by-hop connection headers above, add back any
// necessary for protocol upgrades, such as for websockets.
if reqUpType != "" {
outreq.Header.Set("Connection", "Upgrade")
outreq.Header.Set("Upgrade", reqUpType)
} }
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
@@ -250,32 +252,27 @@ func (p *ReverseProxy) serveHTTP(rw http.ResponseWriter, req *http.Request) {
res, err := transport.RoundTrip(outreq) res, err := transport.RoundTrip(outreq)
if err != nil { if err != nil {
p.logf("http: proxy error: %v", err) p.getErrorHandler()(rw, outreq, err)
rw.WriteHeader(http.StatusNotFound)
rw.Write([]byte(NotFound))
return return
} }
// Remove hop-by-hop headers listed in the // Deal with 101 Switching Protocols responses: (WebSocket, h2c, etc)
// "Connection" header of the response. if res.StatusCode == http.StatusSwitchingProtocols {
if c := res.Header.Get("Connection"); c != "" { if !p.modifyResponse(rw, res, outreq) {
for _, f := range strings.Split(c, ",") { return
if f = strings.TrimSpace(f); f != "" {
res.Header.Del(f)
}
} }
p.handleUpgradeResponse(rw, outreq, res)
return
} }
removeConnectionHeaders(res.Header)
for _, h := range hopHeaders { for _, h := range hopHeaders {
res.Header.Del(h) res.Header.Del(h)
} }
if p.ModifyResponse != nil { if !p.modifyResponse(rw, res, outreq) {
if err := p.ModifyResponse(res); err != nil { return
p.logf("http: proxy error: %v", err)
rw.WriteHeader(http.StatusBadGateway)
return
}
} }
copyHeader(rw.Header(), res.Header) copyHeader(rw.Header(), res.Header)
@@ -292,6 +289,21 @@ func (p *ReverseProxy) serveHTTP(rw http.ResponseWriter, req *http.Request) {
} }
rw.WriteHeader(res.StatusCode) rw.WriteHeader(res.StatusCode)
err = p.copyResponse(rw, res.Body, p.flushInterval(req, res))
if err != nil {
defer res.Body.Close()
// Since we're streaming the response, if we run into an error all we can do
// is abort the request. Issue 23643: ReverseProxy should use ErrAbortHandler
// on read error while copying body.
if !shouldPanicOnCopyError(req) {
p.logf("suppressing panic for copyResponse error in test; copy error: %v", err)
return
}
panic(http.ErrAbortHandler)
}
res.Body.Close() // close now, instead of defer, to populate res.Trailer
if len(res.Trailer) > 0 { if len(res.Trailer) > 0 {
// Force chunking if we saw a response trailer. // Force chunking if we saw a response trailer.
// This prevents net/http from calculating the length for short // This prevents net/http from calculating the length for short
@@ -300,8 +312,6 @@ func (p *ReverseProxy) serveHTTP(rw http.ResponseWriter, req *http.Request) {
fl.Flush() fl.Flush()
} }
} }
p.copyResponse(rw, res.Body)
res.Body.Close() // close now, instead of defer, to populate res.Trailer
if len(res.Trailer) == announcedTrailers { if len(res.Trailer) == announcedTrailers {
copyHeader(rw.Header(), res.Trailer) copyHeader(rw.Header(), res.Trailer)
@@ -316,16 +326,68 @@ func (p *ReverseProxy) serveHTTP(rw http.ResponseWriter, req *http.Request) {
} }
} }
func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) { var inOurTests bool // whether we're in our own tests
if p.FlushInterval != 0 {
// shouldPanicOnCopyError reports whether the reverse proxy should
// panic with http.ErrAbortHandler. This is the right thing to do by
// default, but Go 1.10 and earlier did not, so existing unit tests
// weren't expecting panics. Only panic in our own tests, or when
// running under the HTTP server.
func shouldPanicOnCopyError(req *http.Request) bool {
if inOurTests {
// Our tests know to handle this panic.
return true
}
if req.Context().Value(http.ServerContextKey) != nil {
// We seem to be running under an HTTP server, so
// it'll recover the panic.
return true
}
// Otherwise act like Go 1.10 and earlier to not break
// existing tests.
return false
}
// removeConnectionHeaders removes hop-by-hop headers listed in the "Connection" header of h.
// See RFC 7230, section 6.1
func removeConnectionHeaders(h http.Header) {
for _, f := range h["Connection"] {
for _, sf := range strings.Split(f, ",") {
if sf = strings.TrimSpace(sf); sf != "" {
h.Del(sf)
}
}
}
}
// flushInterval returns the p.FlushInterval value, conditionally
// overriding its value for a specific request/response.
func (p *ReverseProxy) flushInterval(req *http.Request, res *http.Response) time.Duration {
resCT := res.Header.Get("Content-Type")
// For Server-Sent Events responses, flush immediately.
// The MIME type is defined in https://www.w3.org/TR/eventsource/#text-event-stream
if resCT == "text/event-stream" {
return -1 // negative means immediately
}
// TODO: more specific cases? e.g. res.ContentLength == -1?
return p.FlushInterval
}
func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader, flushInterval time.Duration) error {
if flushInterval != 0 {
if wf, ok := dst.(writeFlusher); ok { if wf, ok := dst.(writeFlusher); ok {
mlw := &maxLatencyWriter{ mlw := &maxLatencyWriter{
dst: wf, dst: wf,
latency: p.FlushInterval, latency: flushInterval,
done: make(chan bool),
} }
go mlw.flushLoop()
defer mlw.stop() defer mlw.stop()
// set up initial timer so headers get flushed even if body writes are delayed
mlw.flushPending = true
mlw.t = time.AfterFunc(flushInterval, mlw.delayedFlush)
dst = mlw dst = mlw
} }
} }
@@ -333,13 +395,14 @@ func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {
var buf []byte var buf []byte
if p.BufferPool != nil { if p.BufferPool != nil {
buf = p.BufferPool.Get() buf = p.BufferPool.Get()
defer p.BufferPool.Put(buf)
} }
p.copyBuffer(dst, src, buf) _, err := p.copyBuffer(dst, src, buf)
if p.BufferPool != nil { return err
p.BufferPool.Put(buf)
}
} }
// copyBuffer returns any write errors or non-EOF read errors, and the amount
// of bytes written.
func (p *ReverseProxy) copyBuffer(dst io.Writer, src io.Reader, buf []byte) (int64, error) { func (p *ReverseProxy) copyBuffer(dst io.Writer, src io.Reader, buf []byte) (int64, error) {
if len(buf) == 0 { if len(buf) == 0 {
buf = make([]byte, 32*1024) buf = make([]byte, 32*1024)
@@ -363,6 +426,9 @@ func (p *ReverseProxy) copyBuffer(dst io.Writer, src io.Reader, buf []byte) (int
} }
} }
if rerr != nil { if rerr != nil {
if rerr == io.EOF {
rerr = nil
}
return written, rerr return written, rerr
} }
} }
@@ -383,47 +449,115 @@ type writeFlusher interface {
type maxLatencyWriter struct { type maxLatencyWriter struct {
dst writeFlusher dst writeFlusher
latency time.Duration latency time.Duration // non-zero; negative means to flush immediately
mu sync.Mutex // protects Write + Flush mu sync.Mutex // protects t, flushPending, and dst.Flush
done chan bool t *time.Timer
flushPending bool
} }
func (m *maxLatencyWriter) Write(p []byte) (int, error) { func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
m.mu.Lock() m.mu.Lock()
defer m.mu.Unlock() defer m.mu.Unlock()
return m.dst.Write(p) n, err = m.dst.Write(p)
if m.latency < 0 {
m.dst.Flush()
return
}
if m.flushPending {
return
}
if m.t == nil {
m.t = time.AfterFunc(m.latency, m.delayedFlush)
} else {
m.t.Reset(m.latency)
}
m.flushPending = true
return
} }
func (m *maxLatencyWriter) flushLoop() { func (m *maxLatencyWriter) delayedFlush() {
t := time.NewTicker(m.latency) m.mu.Lock()
defer t.Stop() defer m.mu.Unlock()
for { if !m.flushPending { // if stop was called but AfterFunc already started this goroutine
select { return
case <-m.done: }
if onExitFlushLoop != nil { m.dst.Flush()
onExitFlushLoop() m.flushPending = false
} }
return
case <-t.C: func (m *maxLatencyWriter) stop() {
m.mu.Lock() m.mu.Lock()
m.dst.Flush() defer m.mu.Unlock()
m.mu.Unlock() m.flushPending = false
} if m.t != nil {
m.t.Stop()
} }
} }
func (m *maxLatencyWriter) stop() { m.done <- true } func upgradeType(h http.Header) string {
if !httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") {
func IsWebsocketRequest(req *http.Request) bool { return ""
containsHeader := func(name, value string) bool {
items := strings.Split(req.Header.Get(name), ",")
for _, item := range items {
if value == strings.ToLower(strings.TrimSpace(item)) {
return true
}
}
return false
} }
return containsHeader("Connection", "upgrade") && containsHeader("Upgrade", "websocket") return strings.ToLower(h.Get("Upgrade"))
}
func (p *ReverseProxy) handleUpgradeResponse(rw http.ResponseWriter, req *http.Request, res *http.Response) {
reqUpType := upgradeType(req.Header)
resUpType := upgradeType(res.Header)
if reqUpType != resUpType {
p.getErrorHandler()(rw, req, fmt.Errorf("backend tried to switch protocol %q when %q was requested", resUpType, reqUpType))
return
}
copyHeader(res.Header, rw.Header())
hj, ok := rw.(http.Hijacker)
if !ok {
p.getErrorHandler()(rw, req, fmt.Errorf("can't switch protocols using non-Hijacker ResponseWriter type %T", rw))
return
}
backConn, ok := res.Body.(io.ReadWriteCloser)
if !ok {
p.getErrorHandler()(rw, req, fmt.Errorf("internal error: 101 switching protocols response with non-writable body"))
return
}
defer backConn.Close()
conn, brw, err := hj.Hijack()
if err != nil {
p.getErrorHandler()(rw, req, fmt.Errorf("Hijack failed on protocol switch: %v", err))
return
}
defer conn.Close()
res.Body = nil // so res.Write only writes the headers; we have res.Body in backConn above
if err := res.Write(brw); err != nil {
p.getErrorHandler()(rw, req, fmt.Errorf("response write: %v", err))
return
}
if err := brw.Flush(); err != nil {
p.getErrorHandler()(rw, req, fmt.Errorf("response flush: %v", err))
return
}
errc := make(chan error, 1)
spc := switchProtocolCopier{user: conn, backend: backConn}
go spc.copyToBackend(errc)
go spc.copyFromBackend(errc)
<-errc
return
}
// switchProtocolCopier exists so goroutines proxying data back and
// forth have nice names in stacks.
type switchProtocolCopier struct {
user, backend io.ReadWriter
}
func (c switchProtocolCopier) copyFromBackend(errc chan<- error) {
_, err := io.Copy(c.user, c.backend)
errc <- err
}
func (c switchProtocolCopier) copyToBackend(errc chan<- error) {
_, err := io.Copy(c.backend, c.user)
errc <- err
} }

View File

@@ -1,11 +1,16 @@
package vhost package vhost
import ( import (
"errors"
"sort" "sort"
"strings" "strings"
"sync" "sync"
) )
var (
ErrRouterConfigConflict = errors.New("router config conflict")
)
type VhostRouters struct { type VhostRouters struct {
RouterByDomain map[string][]*VhostRouter RouterByDomain map[string][]*VhostRouter
mutex sync.RWMutex mutex sync.RWMutex
@@ -24,10 +29,14 @@ func NewVhostRouters() *VhostRouters {
} }
} }
func (r *VhostRouters) Add(domain, location string, payload interface{}) { func (r *VhostRouters) Add(domain, location string, payload interface{}) error {
r.mutex.Lock() r.mutex.Lock()
defer r.mutex.Unlock() defer r.mutex.Unlock()
if _, exist := r.exist(domain, location); exist {
return ErrRouterConfigConflict
}
vrs, found := r.RouterByDomain[domain] vrs, found := r.RouterByDomain[domain]
if !found { if !found {
vrs = make([]*VhostRouter, 0, 1) vrs = make([]*VhostRouter, 0, 1)
@@ -42,6 +51,7 @@ func (r *VhostRouters) Add(domain, location string, payload interface{}) {
sort.Sort(sort.Reverse(ByLocation(vrs))) sort.Sort(sort.Reverse(ByLocation(vrs)))
r.RouterByDomain[domain] = vrs r.RouterByDomain[domain] = vrs
return nil
} }
func (r *VhostRouters) Del(domain, location string) { func (r *VhostRouters) Del(domain, location string) {
@@ -80,10 +90,7 @@ func (r *VhostRouters) Get(host, path string) (vr *VhostRouter, exist bool) {
return return
} }
func (r *VhostRouters) Exist(host, path string) (vr *VhostRouter, exist bool) { func (r *VhostRouters) exist(host, path string) (vr *VhostRouter, exist bool) {
r.mutex.RLock()
defer r.mutex.RUnlock()
vrs, found := r.RouterByDomain[host] vrs, found := r.RouterByDomain[host]
if !found { if !found {
return return

View File

@@ -15,7 +15,6 @@ package vhost
import ( import (
"fmt" "fmt"
"strings" "strings"
"sync"
"time" "time"
"github.com/fatedier/frp/utils/log" "github.com/fatedier/frp/utils/log"
@@ -35,7 +34,6 @@ type VhostMuxer struct {
authFunc httpAuthFunc authFunc httpAuthFunc
rewriteFunc hostRewriteFunc rewriteFunc hostRewriteFunc
registryRouter *VhostRouters registryRouter *VhostRouters
mutex sync.RWMutex
} }
func NewVhostMuxer(listener frpNet.Listener, vhostFunc muxFunc, authFunc httpAuthFunc, rewriteFunc hostRewriteFunc, timeout time.Duration) (mux *VhostMuxer, err error) { func NewVhostMuxer(listener frpNet.Listener, vhostFunc muxFunc, authFunc httpAuthFunc, rewriteFunc hostRewriteFunc, timeout time.Duration) (mux *VhostMuxer, err error) {
@@ -51,8 +49,9 @@ func NewVhostMuxer(listener frpNet.Listener, vhostFunc muxFunc, authFunc httpAut
return mux, nil return mux, nil
} }
type CreateConnFunc func() (frpNet.Conn, error) type CreateConnFunc func(remoteAddr string) (frpNet.Conn, error)
// VhostRouteConfig is the params used to match HTTP requests
type VhostRouteConfig struct { type VhostRouteConfig struct {
Domain string Domain string
Location string Location string
@@ -67,14 +66,6 @@ type VhostRouteConfig struct {
// listen for a new domain name, if rewriteHost is not empty and rewriteFunc is not nil // listen for a new domain name, if rewriteHost is not empty and rewriteFunc is not nil
// then rewrite the host header to rewriteHost // then rewrite the host header to rewriteHost
func (v *VhostMuxer) Listen(cfg *VhostRouteConfig) (l *Listener, err error) { func (v *VhostMuxer) Listen(cfg *VhostRouteConfig) (l *Listener, err error) {
v.mutex.Lock()
defer v.mutex.Unlock()
_, ok := v.registryRouter.Exist(cfg.Domain, cfg.Location)
if ok {
return nil, fmt.Errorf("hostname [%s] location [%s] is already registered", cfg.Domain, cfg.Location)
}
l = &Listener{ l = &Listener{
name: cfg.Domain, name: cfg.Domain,
location: cfg.Location, location: cfg.Location,
@@ -85,14 +76,14 @@ func (v *VhostMuxer) Listen(cfg *VhostRouteConfig) (l *Listener, err error) {
accept: make(chan frpNet.Conn), accept: make(chan frpNet.Conn),
Logger: log.NewPrefixLogger(""), Logger: log.NewPrefixLogger(""),
} }
v.registryRouter.Add(cfg.Domain, cfg.Location, l) err = v.registryRouter.Add(cfg.Domain, cfg.Location, l)
if err != nil {
return
}
return l, nil return l, nil
} }
func (v *VhostMuxer) getListener(name, path string) (l *Listener, exist bool) { func (v *VhostMuxer) getListener(name, path string) (l *Listener, exist bool) {
v.mutex.RLock()
defer v.mutex.RUnlock()
// first we check the full hostname // first we check the full hostname
// if not exist, then check the wildcard_domain such as *.example.com // if not exist, then check the wildcard_domain such as *.example.com
vr, found := v.registryRouter.Get(name, path) vr, found := v.registryRouter.Get(name, path)
@@ -102,17 +93,24 @@ func (v *VhostMuxer) getListener(name, path string) (l *Listener, exist bool) {
domainSplit := strings.Split(name, ".") domainSplit := strings.Split(name, ".")
if len(domainSplit) < 3 { if len(domainSplit) < 3 {
return l, false
}
domainSplit[0] = "*"
name = strings.Join(domainSplit, ".")
vr, found = v.registryRouter.Get(name, path)
if !found {
return return
} }
return vr.payload.(*Listener), true for {
if len(domainSplit) < 3 {
return
}
domainSplit[0] = "*"
name = strings.Join(domainSplit, ".")
vr, found = v.registryRouter.Get(name, path)
if found {
return vr.payload.(*Listener), true
}
domainSplit = domainSplit[1:]
}
return
} }
func (v *VhostMuxer) run() { func (v *VhostMuxer) run() {

View File

@@ -1,6 +1,8 @@
language: go language: go
go: go:
- 1.9 - 1.9.x
- 1.10.x
- 1.11.x
before_install: before_install:
- go get -t -v ./... - go get -t -v ./...

View File

@@ -20,24 +20,21 @@
**kcp-go** is a **Production-Grade Reliable-UDP** library for [golang](https://golang.org/). **kcp-go** is a **Production-Grade Reliable-UDP** library for [golang](https://golang.org/).
It provides **fast, ordered and error-checked** delivery of streams over **UDP** packets, has been well tested with opensource project [kcptun](https://github.com/xtaci/kcptun). Millions of devices(from low-end MIPS routers to high-end servers) are running with **kcp-go** at present, including applications like **online games, live broadcasting, file synchronization and network acceleration**. This library intents to provide a **smooth, resilient, ordered, error-checked and anonymous** delivery of streams over **UDP** packets, it has been battle-tested with opensource project [kcptun](https://github.com/xtaci/kcptun). Millions of devices(from low-end MIPS routers to high-end servers) have deployed **kcp-go** powered program in a variety of forms like **online games, live broadcasting, file synchronization and network acceleration**.
[Lastest Release](https://github.com/xtaci/kcp-go/releases) [Lastest Release](https://github.com/xtaci/kcp-go/releases)
## Features ## Features
1. Optimized for **Realtime Online Games, Audio/Video Streaming and Latency-Sensitive Distributed Consensus**. 1. Designed for **Latency-sensitive** scenarios.
1. Compatible with [skywind3000's](https://github.com/skywind3000) C version with language specific optimizations.
1. **Cache friendly** and **Memory optimized** design, offers extremely **High Performance** core. 1. **Cache friendly** and **Memory optimized** design, offers extremely **High Performance** core.
1. Handles **>5K concurrent connections** on a single commodity server. 1. Handles **>5K concurrent connections** on a single commodity server.
1. Compatible with [net.Conn](https://golang.org/pkg/net/#Conn) and [net.Listener](https://golang.org/pkg/net/#Listener), a drop-in replacement for [net.TCPConn](https://golang.org/pkg/net/#TCPConn). 1. Compatible with [net.Conn](https://golang.org/pkg/net/#Conn) and [net.Listener](https://golang.org/pkg/net/#Listener), a drop-in replacement for [net.TCPConn](https://golang.org/pkg/net/#TCPConn).
1. [FEC(Forward Error Correction)](https://en.wikipedia.org/wiki/Forward_error_correction) Support with [Reed-Solomon Codes](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction) 1. [FEC(Forward Error Correction)](https://en.wikipedia.org/wiki/Forward_error_correction) Support with [Reed-Solomon Codes](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction)
1. Packet level encryption support with [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard), [TEA](https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm), [3DES](https://en.wikipedia.org/wiki/Triple_DES), [Blowfish](https://en.wikipedia.org/wiki/Blowfish_(cipher)), [Cast5](https://en.wikipedia.org/wiki/CAST-128), [Salsa20]( https://en.wikipedia.org/wiki/Salsa20), etc. in [CFB](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Feedback_.28CFB.29) mode. 1. Packet level encryption support with [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard), [TEA](https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm), [3DES](https://en.wikipedia.org/wiki/Triple_DES), [Blowfish](https://en.wikipedia.org/wiki/Blowfish_(cipher)), [Cast5](https://en.wikipedia.org/wiki/CAST-128), [Salsa20]( https://en.wikipedia.org/wiki/Salsa20), etc. in [CFB](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Feedback_.28CFB.29) mode, which generates completely anonymous packet.
1. **Fixed number of goroutines** created for the entire server application, minimized goroutine context switch. 1. Only **A fixed number of goroutines** will be created for the entire server application, costs in **context switch** between goroutines have been taken into consideration.
1. Compatible with [skywind3000's](https://github.com/skywind3000) C version with various improvements.
## Conventions 1. Platform-dependent optimizations: [sendmmsg](http://man7.org/linux/man-pages/man2/sendmmsg.2.html) and [recvmmsg](http://man7.org/linux/man-pages/man2/recvmmsg.2.html) were expoloited for linux.
Control messages like **SYN/FIN/RST** in TCP **are not defined** in KCP, you need some **keepalive/heartbeat mechanism** in the application-level. A real world example is to use some **multiplexing** protocol over session, such as [smux](https://github.com/xtaci/smux)(with embedded keepalive mechanism), see [kcptun](https://github.com/xtaci/kcptun) for example.
## Documentation ## Documentation
@@ -47,6 +44,24 @@ For complete documentation, see the associated [Godoc](https://godoc.org/github.
<img src="frame.png" alt="Frame Format" height="109px" /> <img src="frame.png" alt="Frame Format" height="109px" />
```
NONCE:
16bytes cryptographically secure random number, nonce changes for every packet.
CRC32:
CRC-32 checksum of data using the IEEE polynomial
FEC TYPE:
typeData = 0xF1
typeParity = 0xF2
FEC SEQID:
monotonically increasing in range: [0, (0xffffffff/shardSize) * shardSize - 1]
SIZE:
The size of KCP frame plus 2
```
``` ```
+-----------------+ +-----------------+
| SESSION | | SESSION |
@@ -69,58 +84,69 @@ For complete documentation, see the associated [Godoc](https://godoc.org/github.
``` ```
## Usage ## Examples
Client: [full demo](https://github.com/xtaci/kcptun/blob/master/client/main.go) 1. [simple examples](https://github.com/xtaci/kcp-go/tree/master/examples)
```go 2. [kcptun client](https://github.com/xtaci/kcptun/blob/master/client/main.go)
kcpconn, err := kcp.DialWithOptions("192.168.0.1:10000", nil, 10, 3) 3. [kcptun server](https://github.com/xtaci/kcptun/blob/master/server/main.go)
```
Server: [full demo](https://github.com/xtaci/kcptun/blob/master/server/main.go)
```go
lis, err := kcp.ListenWithOptions(":10000", nil, 10, 3)
```
## Performance ## Benchmark
``` ```
Model Name: MacBook Pro Model Name: MacBook Pro
Model Identifier: MacBookPro12,1 Model Identifier: MacBookPro14,1
Processor Name: Intel Core i5 Processor Name: Intel Core i5
Processor Speed: 2.7 GHz Processor Speed: 3.1 GHz
Number of Processors: 1 Number of Processors: 1
Total Number of Cores: 2 Total Number of Cores: 2
L2 Cache (per Core): 256 KB L2 Cache (per Core): 256 KB
L3 Cache: 3 MB L3 Cache: 4 MB
Memory: 8 GB Memory: 8 GB
``` ```
``` ```
$ go test -v -run=^$ -bench . $ go test -v -run=^$ -bench .
beginning tests, encryption:salsa20, fec:10/3 beginning tests, encryption:salsa20, fec:10/3
BenchmarkAES128-4 200000 8256 ns/op 363.33 MB/s 0 B/op 0 allocs/op goos: darwin
BenchmarkAES192-4 200000 9153 ns/op 327.74 MB/s 0 B/op 0 allocs/op goarch: amd64
BenchmarkAES256-4 200000 10079 ns/op 297.64 MB/s 0 B/op 0 allocs/op pkg: github.com/xtaci/kcp-go
BenchmarkTEA-4 100000 18643 ns/op 160.91 MB/s 0 B/op 0 allocs/op BenchmarkSM4-4 50000 32180 ns/op 93.23 MB/s 0 B/op 0 allocs/op
BenchmarkXOR-4 5000000 316 ns/op 9486.46 MB/s 0 B/op 0 allocs/op BenchmarkAES128-4 500000 3285 ns/op 913.21 MB/s 0 B/op 0 allocs/op
BenchmarkBlowfish-4 50000 35643 ns/op 84.17 MB/s 0 B/op 0 allocs/op BenchmarkAES192-4 300000 3623 ns/op 827.85 MB/s 0 B/op 0 allocs/op
BenchmarkNone-4 30000000 56.2 ns/op 53371.83 MB/s 0 B/op 0 allocs/op BenchmarkAES256-4 300000 3874 ns/op 774.20 MB/s 0 B/op 0 allocs/op
BenchmarkCast5-4 30000 44744 ns/op 67.05 MB/s 0 B/op 0 allocs/op BenchmarkTEA-4 100000 15384 ns/op 195.00 MB/s 0 B/op 0 allocs/op
Benchmark3DES-4 2000 639839 ns/op 4.69 MB/s 2 B/op 0 allocs/op BenchmarkXOR-4 20000000 89.9 ns/op 33372.00 MB/s 0 B/op 0 allocs/op
BenchmarkTwofish-4 30000 43368 ns/op 69.17 MB/s 0 B/op 0 allocs/op BenchmarkBlowfish-4 50000 26927 ns/op 111.41 MB/s 0 B/op 0 allocs/op
BenchmarkXTEA-4 30000 57673 ns/op 52.02 MB/s 0 B/op 0 allocs/op BenchmarkNone-4 30000000 45.7 ns/op 65597.94 MB/s 0 B/op 0 allocs/op
BenchmarkSalsa20-4 300000 3917 ns/op 765.80 MB/s 0 B/op 0 allocs/op BenchmarkCast5-4 50000 34258 ns/op 87.57 MB/s 0 B/op 0 allocs/op
BenchmarkFlush-4 10000000 226 ns/op 0 B/op 0 allocs/op Benchmark3DES-4 10000 117149 ns/op 25.61 MB/s 0 B/op 0 allocs/op
BenchmarkEchoSpeed4K-4 5000 300030 ns/op 13.65 MB/s 5672 B/op 177 allocs/op BenchmarkTwofish-4 50000 33538 ns/op 89.45 MB/s 0 B/op 0 allocs/op
BenchmarkEchoSpeed64K-4 500 3202335 ns/op 20.47 MB/s 73295 B/op 2198 allocs/op BenchmarkXTEA-4 30000 45666 ns/op 65.69 MB/s 0 B/op 0 allocs/op
BenchmarkEchoSpeed512K-4 50 24926924 ns/op 21.03 MB/s 659339 B/op 17602 allocs/op BenchmarkSalsa20-4 500000 3308 ns/op 906.76 MB/s 0 B/op 0 allocs/op
BenchmarkEchoSpeed1M-4 20 64857821 ns/op 16.17 MB/s 1772437 B/op 42869 allocs/op BenchmarkCRC32-4 20000000 65.2 ns/op 15712.43 MB/s
BenchmarkSinkSpeed4K-4 30000 50230 ns/op 81.54 MB/s 2058 B/op 48 allocs/op BenchmarkCsprngSystem-4 1000000 1150 ns/op 13.91 MB/s
BenchmarkSinkSpeed64K-4 2000 648718 ns/op 101.02 MB/s 31165 B/op 687 allocs/op BenchmarkCsprngMD5-4 10000000 145 ns/op 110.26 MB/s
BenchmarkSinkSpeed256K-4 300 4635905 ns/op 113.09 MB/s 286229 B/op 5516 allocs/op BenchmarkCsprngSHA1-4 10000000 158 ns/op 126.54 MB/s
BenchmarkSinkSpeed1M-4 200 9566933 ns/op 109.60 MB/s 463771 B/op 10701 allocs/op BenchmarkCsprngNonceMD5-4 10000000 153 ns/op 104.22 MB/s
BenchmarkCsprngNonceAES128-4 100000000 19.1 ns/op 837.81 MB/s
BenchmarkFECDecode-4 1000000 1119 ns/op 1339.61 MB/s 1606 B/op 2 allocs/op
BenchmarkFECEncode-4 2000000 832 ns/op 1801.83 MB/s 17 B/op 0 allocs/op
BenchmarkFlush-4 5000000 272 ns/op 0 B/op 0 allocs/op
BenchmarkEchoSpeed4K-4 5000 259617 ns/op 15.78 MB/s 5451 B/op 149 allocs/op
BenchmarkEchoSpeed64K-4 1000 1706084 ns/op 38.41 MB/s 56002 B/op 1604 allocs/op
BenchmarkEchoSpeed512K-4 100 14345505 ns/op 36.55 MB/s 482597 B/op 13045 allocs/op
BenchmarkEchoSpeed1M-4 30 34859104 ns/op 30.08 MB/s 1143773 B/op 27186 allocs/op
BenchmarkSinkSpeed4K-4 50000 31369 ns/op 130.57 MB/s 1566 B/op 30 allocs/op
BenchmarkSinkSpeed64K-4 5000 329065 ns/op 199.16 MB/s 21529 B/op 453 allocs/op
BenchmarkSinkSpeed256K-4 500 2373354 ns/op 220.91 MB/s 166332 B/op 3554 allocs/op
BenchmarkSinkSpeed1M-4 300 5117927 ns/op 204.88 MB/s 310378 B/op 6988 allocs/op
PASS PASS
ok _/Users/xtaci/.godeps/src/github.com/xtaci/kcp-go 39.689s ok github.com/xtaci/kcp-go 50.349s
``` ```
## Design Considerations
## Typical Flame Graph
![Flame Graph in kcptun](flame.png)
## Key Design Considerations
1. slice vs. container/list 1. slice vs. container/list
@@ -139,7 +165,9 @@ List structure introduces **heavy cache misses** compared to slice which owns be
2. Timing accuracy vs. syscall clock_gettime 2. Timing accuracy vs. syscall clock_gettime
Timing is **critical** to **RTT estimator**, inaccurate timing introduces false retransmissions in KCP, but calling `time.Now()` costs 42 cycles(10.5ns on 4GHz CPU, 15.6ns on my MacBook Pro 2.7GHz), the benchmark for time.Now(): Timing is **critical** to **RTT estimator**, inaccurate timing leads to false retransmissions in KCP, but calling `time.Now()` costs 42 cycles(10.5ns on 4GHz CPU, 15.6ns on my MacBook Pro 2.7GHz).
The benchmark for time.Now() lies here:
https://github.com/xtaci/notes/blob/master/golang/benchmark2/syscall_test.go https://github.com/xtaci/notes/blob/master/golang/benchmark2/syscall_test.go
@@ -147,14 +175,37 @@ https://github.com/xtaci/notes/blob/master/golang/benchmark2/syscall_test.go
BenchmarkNow-4 100000000 15.6 ns/op BenchmarkNow-4 100000000 15.6 ns/op
``` ```
In kcp-go, after each `kcp.output()` function call, current time will be updated upon return, and each `kcp.flush()` will get current time once. For most of the time, 5000 connections costs 5000 * 15.6ns = 78us(no packet needs to be sent by `kcp.output()`), as for 10MB/s data transfering with 1400 MTU, `kcp.output()` will be called around 7500 times and costs 117us for `time.Now()` in **every second**. In kcp-go, after each `kcp.output()` function call, current clock time will be updated upon return, and for a single `kcp.flush()` operation, current time will be queried from system once. For most of the time, 5000 connections costs 5000 * 15.6ns = 78us(a fixed cost while no packet needs to be sent), as for 10MB/s data transfering with 1400 MTU, `kcp.output()` will be called around 7500 times and costs 117us for `time.Now()` in **every second**.
3. Memory management
## Tuning Primary memory allocation are done from a global buffer pool xmit.Buf, in kcp-go, when we need to allocate some bytes, we can get from that pool, and a fixed-capacity 1500 bytes(mtuLimit) will be returned, the rx queue, tx queue and fec queue all receive bytes from there, and they will return the bytes to the pool after using to prevent unnecessary zer0ing of bytes. The pool mechanism maintained a high watermark for slice objects, these in-flight objects from the pool will survive from the perodical garbage collection, meanwhile the pool kept the ability to return the memory to runtime if in idle.
Q: I'm handling >5K connections on my server. the CPU utilization is high. 4. Information security
A: A standalone `agent` or `gate` server for kcp-go is suggested, not only for CPU utilization, but also important to the **precision** of RTT measurements which indirectly affects retransmission. By increasing update `interval` with `SetNoDelay` like `conn.SetNoDelay(1, 40, 1, 1)` will dramatically reduce system load. kcp-go is shipped with builtin packet encryption powered by various block encryption algorithms and works in [Cipher Feedback Mode](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Feedback_(CFB)), for each packet to be sent, the encryption process will start from encrypting a [nonce](https://en.wikipedia.org/wiki/Cryptographic_nonce) from the [system entropy](https://en.wikipedia.org/wiki//dev/random), so encryption to same plaintexts never leads to a same ciphertexts thereafter.
The contents of the packets are completely anonymous with encryption, including the headers(FEC,KCP), checksums and contents. Note that, no matter which encryption method you choose on you upper layer, if you disable encryption, the transmit will be insecure somehow, since the header is ***PLAINTEXT*** to everyone it would be susceptible to header tampering, such as jamming the *sliding window size*, *round-trip time*, *FEC property* and *checksums*. ```AES-128``` is suggested for minimal encryption since modern CPUs are shipped with [AES-NI](https://en.wikipedia.org/wiki/AES_instruction_set) instructions and performs even better than `salsa20`(check the table above).
Other possible attacks to kcp-go includes: a) [traffic analysis](https://en.wikipedia.org/wiki/Traffic_analysis), dataflow on specific websites may have pattern while interchanging data, but this type of eavesdropping has been mitigated by adapting [smux](https://github.com/xtaci/smux) to mix data streams so as to introduce noises, perfect solution to this has not appeared yet, theroretically by shuffling/mixing messages on larger scale network may mitigate this problem. b) [replay attack](https://en.wikipedia.org/wiki/Replay_attack), since the asymmetrical encryption has not been introduced into kcp-go for some reason, capturing the packets and replay them on a different machine is possible, (notice: hijacking the session and decrypting the contents is still *impossible*), so upper layers should contain a asymmetrical encryption system to guarantee the authenticity of each message(to process message exactly once), such as HTTPS/OpenSSL/LibreSSL, only by signing the requests with private keys can eliminate this type of attack.
## Connection Termination
Control messages like **SYN/FIN/RST** in TCP **are not defined** in KCP, you need some **keepalive/heartbeat mechanism** in the application-level. A real world example is to use some **multiplexing** protocol over session, such as [smux](https://github.com/xtaci/smux)(with embedded keepalive mechanism), see [kcptun](https://github.com/xtaci/kcptun) for example.
## FAQ
Q: I'm handling >5K connections on my server, the CPU utilization is so high.
A: A standalone `agent` or `gate` server for running kcp-go is suggested, not only for CPU utilization, but also important to the **precision** of RTT measurements(timing) which indirectly affects retransmission. By increasing update `interval` with `SetNoDelay` like `conn.SetNoDelay(1, 40, 1, 1)` will dramatically reduce system load, but lower the performance.
Q: When should I enable FEC?
A: Forward error correction is critical to long-distance transmission, because a packet loss will lead to a huge penalty in time. And for the complicated packet routing network in modern world, round-trip time based loss check will not always be efficient, the big deviation of RTT samples in the long way usually leads to a larger RTO value in typical rtt estimator, which in other words, slows down the transmission.
Q: Should I enable encryption?
A: Yes, for the safety of protocol, even if the upper layer has encrypted.
## Who is using this? ## Who is using this?
@@ -163,10 +214,9 @@ A: A standalone `agent` or `gate` server for kcp-go is suggested, not only for C
3. https://github.com/smallnest/rpcx -- A RPC service framework based on net/rpc like alibaba Dubbo and weibo Motan. 3. https://github.com/smallnest/rpcx -- A RPC service framework based on net/rpc like alibaba Dubbo and weibo Motan.
4. https://github.com/gonet2/agent -- A gateway for games with stream multiplexing. 4. https://github.com/gonet2/agent -- A gateway for games with stream multiplexing.
5. https://github.com/syncthing/syncthing -- Open Source Continuous File Synchronization. 5. https://github.com/syncthing/syncthing -- Open Source Continuous File Synchronization.
6. https://play.google.com/store/apps/details?id=com.k17game.k3 -- Battle Zone - Earth 2048, a world-wide strategy game.
## Links ## Links
1. https://github.com/xtaci/libkcp -- FEC enhanced KCP session library for iOS/Android in C++ 1. https://github.com/xtaci/libkcp -- FEC enhanced KCP session library for iOS/Android in C++
2. https://github.com/skywind3000/kcp -- A Fast and Reliable ARQ Protocol 2. https://github.com/skywind3000/kcp -- A Fast and Reliable ARQ Protocol
3. https://github.com/templexxx/reedsolomon -- Reed-Solomon Erasure Coding in Go 3. https://github.com/klauspost/reedsolomon -- Reed-Solomon Erasure Coding in Go

12
vendor/github.com/fatedier/kcp-go/batchconn.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
package kcp
import "golang.org/x/net/ipv4"
const (
batchSize = 16
)
type batchConn interface {
WriteBatch(ms []ipv4.Message, flags int) (int, error)
ReadBatch(ms []ipv4.Message, flags int) (int, error)
}

View File

@@ -57,8 +57,8 @@ func (c *salsa20BlockCrypt) Decrypt(dst, src []byte) {
} }
type sm4BlockCrypt struct { type sm4BlockCrypt struct {
encbuf []byte encbuf [sm4.BlockSize]byte
decbuf []byte decbuf [2 * sm4.BlockSize]byte
block cipher.Block block cipher.Block
} }
@@ -70,17 +70,15 @@ func NewSM4BlockCrypt(key []byte) (BlockCrypt, error) {
return nil, err return nil, err
} }
c.block = block c.block = block
c.encbuf = make([]byte, sm4.BlockSize)
c.decbuf = make([]byte, 2*sm4.BlockSize)
return c, nil return c, nil
} }
func (c *sm4BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) } func (c *sm4BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *sm4BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) } func (c *sm4BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type twofishBlockCrypt struct { type twofishBlockCrypt struct {
encbuf []byte encbuf [twofish.BlockSize]byte
decbuf []byte decbuf [2 * twofish.BlockSize]byte
block cipher.Block block cipher.Block
} }
@@ -92,17 +90,15 @@ func NewTwofishBlockCrypt(key []byte) (BlockCrypt, error) {
return nil, err return nil, err
} }
c.block = block c.block = block
c.encbuf = make([]byte, twofish.BlockSize)
c.decbuf = make([]byte, 2*twofish.BlockSize)
return c, nil return c, nil
} }
func (c *twofishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) } func (c *twofishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *twofishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) } func (c *twofishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type tripleDESBlockCrypt struct { type tripleDESBlockCrypt struct {
encbuf []byte encbuf [des.BlockSize]byte
decbuf []byte decbuf [2 * des.BlockSize]byte
block cipher.Block block cipher.Block
} }
@@ -114,17 +110,15 @@ func NewTripleDESBlockCrypt(key []byte) (BlockCrypt, error) {
return nil, err return nil, err
} }
c.block = block c.block = block
c.encbuf = make([]byte, des.BlockSize)
c.decbuf = make([]byte, 2*des.BlockSize)
return c, nil return c, nil
} }
func (c *tripleDESBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) } func (c *tripleDESBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *tripleDESBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) } func (c *tripleDESBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type cast5BlockCrypt struct { type cast5BlockCrypt struct {
encbuf []byte encbuf [cast5.BlockSize]byte
decbuf []byte decbuf [2 * cast5.BlockSize]byte
block cipher.Block block cipher.Block
} }
@@ -136,17 +130,15 @@ func NewCast5BlockCrypt(key []byte) (BlockCrypt, error) {
return nil, err return nil, err
} }
c.block = block c.block = block
c.encbuf = make([]byte, cast5.BlockSize)
c.decbuf = make([]byte, 2*cast5.BlockSize)
return c, nil return c, nil
} }
func (c *cast5BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) } func (c *cast5BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *cast5BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) } func (c *cast5BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type blowfishBlockCrypt struct { type blowfishBlockCrypt struct {
encbuf []byte encbuf [blowfish.BlockSize]byte
decbuf []byte decbuf [2 * blowfish.BlockSize]byte
block cipher.Block block cipher.Block
} }
@@ -158,17 +150,15 @@ func NewBlowfishBlockCrypt(key []byte) (BlockCrypt, error) {
return nil, err return nil, err
} }
c.block = block c.block = block
c.encbuf = make([]byte, blowfish.BlockSize)
c.decbuf = make([]byte, 2*blowfish.BlockSize)
return c, nil return c, nil
} }
func (c *blowfishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) } func (c *blowfishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *blowfishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) } func (c *blowfishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type aesBlockCrypt struct { type aesBlockCrypt struct {
encbuf []byte encbuf [aes.BlockSize]byte
decbuf []byte decbuf [2 * aes.BlockSize]byte
block cipher.Block block cipher.Block
} }
@@ -180,17 +170,15 @@ func NewAESBlockCrypt(key []byte) (BlockCrypt, error) {
return nil, err return nil, err
} }
c.block = block c.block = block
c.encbuf = make([]byte, aes.BlockSize)
c.decbuf = make([]byte, 2*aes.BlockSize)
return c, nil return c, nil
} }
func (c *aesBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) } func (c *aesBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *aesBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) } func (c *aesBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type teaBlockCrypt struct { type teaBlockCrypt struct {
encbuf []byte encbuf [tea.BlockSize]byte
decbuf []byte decbuf [2 * tea.BlockSize]byte
block cipher.Block block cipher.Block
} }
@@ -202,17 +190,15 @@ func NewTEABlockCrypt(key []byte) (BlockCrypt, error) {
return nil, err return nil, err
} }
c.block = block c.block = block
c.encbuf = make([]byte, tea.BlockSize)
c.decbuf = make([]byte, 2*tea.BlockSize)
return c, nil return c, nil
} }
func (c *teaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) } func (c *teaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *teaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) } func (c *teaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type xteaBlockCrypt struct { type xteaBlockCrypt struct {
encbuf []byte encbuf [xtea.BlockSize]byte
decbuf []byte decbuf [2 * xtea.BlockSize]byte
block cipher.Block block cipher.Block
} }
@@ -224,13 +210,11 @@ func NewXTEABlockCrypt(key []byte) (BlockCrypt, error) {
return nil, err return nil, err
} }
c.block = block c.block = block
c.encbuf = make([]byte, xtea.BlockSize)
c.decbuf = make([]byte, 2*xtea.BlockSize)
return c, nil return c, nil
} }
func (c *xteaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf) } func (c *xteaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *xteaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf) } func (c *xteaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type simpleXORBlockCrypt struct { type simpleXORBlockCrypt struct {
xortbl []byte xortbl []byte
@@ -258,31 +242,544 @@ func (c *noneBlockCrypt) Decrypt(dst, src []byte) { copy(dst, src) }
// packet encryption with local CFB mode // packet encryption with local CFB mode
func encrypt(block cipher.Block, dst, src, buf []byte) { func encrypt(block cipher.Block, dst, src, buf []byte) {
switch block.BlockSize() {
case 8:
encrypt8(block, dst, src, buf)
case 16:
encrypt16(block, dst, src, buf)
default:
encryptVariant(block, dst, src, buf)
}
}
// optimized encryption for the ciphers which works in 8-bytes
func encrypt8(block cipher.Block, dst, src, buf []byte) {
tbl := buf[:8]
block.Encrypt(tbl, initialVector)
n := len(src) / 8
base := 0
repeat := n / 8
left := n % 8
for i := 0; i < repeat; i++ {
s := src[base:][0:64]
d := dst[base:][0:64]
// 1
xor.BytesSrc1(d[0:8], s[0:8], tbl)
block.Encrypt(tbl, d[0:8])
// 2
xor.BytesSrc1(d[8:16], s[8:16], tbl)
block.Encrypt(tbl, d[8:16])
// 3
xor.BytesSrc1(d[16:24], s[16:24], tbl)
block.Encrypt(tbl, d[16:24])
// 4
xor.BytesSrc1(d[24:32], s[24:32], tbl)
block.Encrypt(tbl, d[24:32])
// 5
xor.BytesSrc1(d[32:40], s[32:40], tbl)
block.Encrypt(tbl, d[32:40])
// 6
xor.BytesSrc1(d[40:48], s[40:48], tbl)
block.Encrypt(tbl, d[40:48])
// 7
xor.BytesSrc1(d[48:56], s[48:56], tbl)
block.Encrypt(tbl, d[48:56])
// 8
xor.BytesSrc1(d[56:64], s[56:64], tbl)
block.Encrypt(tbl, d[56:64])
base += 64
}
switch left {
case 7:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 6:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 5:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 4:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 3:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 2:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 1:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 0:
xor.BytesSrc0(dst[base:], src[base:], tbl)
}
}
// optimized encryption for the ciphers which works in 16-bytes
func encrypt16(block cipher.Block, dst, src, buf []byte) {
tbl := buf[:16]
block.Encrypt(tbl, initialVector)
n := len(src) / 16
base := 0
repeat := n / 8
left := n % 8
for i := 0; i < repeat; i++ {
s := src[base:][0:128]
d := dst[base:][0:128]
// 1
xor.BytesSrc1(d[0:16], s[0:16], tbl)
block.Encrypt(tbl, d[0:16])
// 2
xor.BytesSrc1(d[16:32], s[16:32], tbl)
block.Encrypt(tbl, d[16:32])
// 3
xor.BytesSrc1(d[32:48], s[32:48], tbl)
block.Encrypt(tbl, d[32:48])
// 4
xor.BytesSrc1(d[48:64], s[48:64], tbl)
block.Encrypt(tbl, d[48:64])
// 5
xor.BytesSrc1(d[64:80], s[64:80], tbl)
block.Encrypt(tbl, d[64:80])
// 6
xor.BytesSrc1(d[80:96], s[80:96], tbl)
block.Encrypt(tbl, d[80:96])
// 7
xor.BytesSrc1(d[96:112], s[96:112], tbl)
block.Encrypt(tbl, d[96:112])
// 8
xor.BytesSrc1(d[112:128], s[112:128], tbl)
block.Encrypt(tbl, d[112:128])
base += 128
}
switch left {
case 7:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 6:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 5:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 4:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 3:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 2:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 1:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 0:
xor.BytesSrc0(dst[base:], src[base:], tbl)
}
}
func encryptVariant(block cipher.Block, dst, src, buf []byte) {
blocksize := block.BlockSize() blocksize := block.BlockSize()
tbl := buf[:blocksize] tbl := buf[:blocksize]
block.Encrypt(tbl, initialVector) block.Encrypt(tbl, initialVector)
n := len(src) / blocksize n := len(src) / blocksize
base := 0 base := 0
for i := 0; i < n; i++ { repeat := n / 8
left := n % 8
for i := 0; i < repeat; i++ {
// 1
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
// 2
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
// 3
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
// 4
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
// 5
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
// 6
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
// 7
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
// 8
xor.BytesSrc1(dst[base:], src[base:], tbl) xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:]) block.Encrypt(tbl, dst[base:])
base += blocksize base += blocksize
} }
xor.BytesSrc0(dst[base:], src[base:], tbl)
switch left {
case 7:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
fallthrough
case 6:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
fallthrough
case 5:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
fallthrough
case 4:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
fallthrough
case 3:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
fallthrough
case 2:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
fallthrough
case 1:
xor.BytesSrc1(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += blocksize
fallthrough
case 0:
xor.BytesSrc0(dst[base:], src[base:], tbl)
}
} }
// decryption
func decrypt(block cipher.Block, dst, src, buf []byte) { func decrypt(block cipher.Block, dst, src, buf []byte) {
switch block.BlockSize() {
case 8:
decrypt8(block, dst, src, buf)
case 16:
decrypt16(block, dst, src, buf)
default:
decryptVariant(block, dst, src, buf)
}
}
func decrypt8(block cipher.Block, dst, src, buf []byte) {
tbl := buf[0:8]
next := buf[8:16]
block.Encrypt(tbl, initialVector)
n := len(src) / 8
base := 0
repeat := n / 8
left := n % 8
for i := 0; i < repeat; i++ {
s := src[base:][0:64]
d := dst[base:][0:64]
// 1
block.Encrypt(next, s[0:8])
xor.BytesSrc1(d[0:8], s[0:8], tbl)
// 2
block.Encrypt(tbl, s[8:16])
xor.BytesSrc1(d[8:16], s[8:16], next)
// 3
block.Encrypt(next, s[16:24])
xor.BytesSrc1(d[16:24], s[16:24], tbl)
// 4
block.Encrypt(tbl, s[24:32])
xor.BytesSrc1(d[24:32], s[24:32], next)
// 5
block.Encrypt(next, s[32:40])
xor.BytesSrc1(d[32:40], s[32:40], tbl)
// 6
block.Encrypt(tbl, s[40:48])
xor.BytesSrc1(d[40:48], s[40:48], next)
// 7
block.Encrypt(next, s[48:56])
xor.BytesSrc1(d[48:56], s[48:56], tbl)
// 8
block.Encrypt(tbl, s[56:64])
xor.BytesSrc1(d[56:64], s[56:64], next)
base += 64
}
switch left {
case 7:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 8
fallthrough
case 6:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 8
fallthrough
case 5:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 8
fallthrough
case 4:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 8
fallthrough
case 3:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 8
fallthrough
case 2:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 8
fallthrough
case 1:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 8
fallthrough
case 0:
xor.BytesSrc0(dst[base:], src[base:], tbl)
}
}
func decrypt16(block cipher.Block, dst, src, buf []byte) {
tbl := buf[0:16]
next := buf[16:32]
block.Encrypt(tbl, initialVector)
n := len(src) / 16
base := 0
repeat := n / 8
left := n % 8
for i := 0; i < repeat; i++ {
s := src[base:][0:128]
d := dst[base:][0:128]
// 1
block.Encrypt(next, s[0:16])
xor.BytesSrc1(d[0:16], s[0:16], tbl)
// 2
block.Encrypt(tbl, s[16:32])
xor.BytesSrc1(d[16:32], s[16:32], next)
// 3
block.Encrypt(next, s[32:48])
xor.BytesSrc1(d[32:48], s[32:48], tbl)
// 4
block.Encrypt(tbl, s[48:64])
xor.BytesSrc1(d[48:64], s[48:64], next)
// 5
block.Encrypt(next, s[64:80])
xor.BytesSrc1(d[64:80], s[64:80], tbl)
// 6
block.Encrypt(tbl, s[80:96])
xor.BytesSrc1(d[80:96], s[80:96], next)
// 7
block.Encrypt(next, s[96:112])
xor.BytesSrc1(d[96:112], s[96:112], tbl)
// 8
block.Encrypt(tbl, s[112:128])
xor.BytesSrc1(d[112:128], s[112:128], next)
base += 128
}
switch left {
case 7:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 6:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 5:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 4:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 3:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 2:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 1:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 0:
xor.BytesSrc0(dst[base:], src[base:], tbl)
}
}
func decryptVariant(block cipher.Block, dst, src, buf []byte) {
blocksize := block.BlockSize() blocksize := block.BlockSize()
tbl := buf[:blocksize] tbl := buf[:blocksize]
next := buf[blocksize:] next := buf[blocksize:]
block.Encrypt(tbl, initialVector) block.Encrypt(tbl, initialVector)
n := len(src) / blocksize n := len(src) / blocksize
base := 0 base := 0
for i := 0; i < n; i++ { repeat := n / 8
left := n % 8
for i := 0; i < repeat; i++ {
// 1
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
base += blocksize
// 2
block.Encrypt(tbl, src[base:])
xor.BytesSrc1(dst[base:], src[base:], next)
base += blocksize
// 3
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
base += blocksize
// 4
block.Encrypt(tbl, src[base:])
xor.BytesSrc1(dst[base:], src[base:], next)
base += blocksize
// 5
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
base += blocksize
// 6
block.Encrypt(tbl, src[base:])
xor.BytesSrc1(dst[base:], src[base:], next)
base += blocksize
// 7
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
base += blocksize
// 8
block.Encrypt(tbl, src[base:])
xor.BytesSrc1(dst[base:], src[base:], next)
base += blocksize
}
switch left {
case 7:
block.Encrypt(next, src[base:]) block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl) xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl tbl, next = next, tbl
base += blocksize base += blocksize
fallthrough
case 6:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += blocksize
fallthrough
case 5:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += blocksize
fallthrough
case 4:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += blocksize
fallthrough
case 3:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += blocksize
fallthrough
case 2:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += blocksize
fallthrough
case 1:
block.Encrypt(next, src[base:])
xor.BytesSrc1(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += blocksize
fallthrough
case 0:
xor.BytesSrc0(dst[base:], src[base:], tbl)
} }
xor.BytesSrc0(dst[base:], src[base:], tbl)
} }

52
vendor/github.com/fatedier/kcp-go/entropy.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
package kcp
import (
"crypto/aes"
"crypto/cipher"
"crypto/md5"
"crypto/rand"
"io"
)
// Entropy defines a entropy source
type Entropy interface {
Init()
Fill(nonce []byte)
}
// nonceMD5 nonce generator for packet header
type nonceMD5 struct {
seed [md5.Size]byte
}
func (n *nonceMD5) Init() { /*nothing required*/ }
func (n *nonceMD5) Fill(nonce []byte) {
if n.seed[0] == 0 { // entropy update
io.ReadFull(rand.Reader, n.seed[:])
}
n.seed = md5.Sum(n.seed[:])
copy(nonce, n.seed[:])
}
// nonceAES128 nonce generator for packet headers
type nonceAES128 struct {
seed [aes.BlockSize]byte
block cipher.Block
}
func (n *nonceAES128) Init() {
var key [16]byte //aes-128
io.ReadFull(rand.Reader, key[:])
io.ReadFull(rand.Reader, n.seed[:])
block, _ := aes.NewCipher(key[:])
n.block = block
}
func (n *nonceAES128) Fill(nonce []byte) {
if n.seed[0] == 0 { // entropy update
io.ReadFull(rand.Reader, n.seed[:])
}
n.block.Encrypt(n.seed[:], n.seed[:])
copy(nonce, n.seed[:])
}

View File

@@ -4,40 +4,41 @@ import (
"encoding/binary" "encoding/binary"
"sync/atomic" "sync/atomic"
"github.com/templexxx/reedsolomon" "github.com/klauspost/reedsolomon"
) )
const ( const (
fecHeaderSize = 6 fecHeaderSize = 6
fecHeaderSizePlus2 = fecHeaderSize + 2 // plus 2B data size fecHeaderSizePlus2 = fecHeaderSize + 2 // plus 2B data size
typeData = 0xf1 typeData = 0xf1
typeFEC = 0xf2 typeParity = 0xf2
) )
type ( // fecPacket is a decoded FEC packet
// fecPacket is a decoded FEC packet type fecPacket []byte
fecPacket struct {
seqid uint32
flag uint16
data []byte
}
// fecDecoder for decoding incoming packets func (bts fecPacket) seqid() uint32 { return binary.LittleEndian.Uint32(bts) }
fecDecoder struct { func (bts fecPacket) flag() uint16 { return binary.LittleEndian.Uint16(bts[4:]) }
rxlimit int // queue size limit func (bts fecPacket) data() []byte { return bts[6:] }
dataShards int
parityShards int
shardSize int
rx []fecPacket // ordered receive queue
// caches // fecDecoder for decoding incoming packets
decodeCache [][]byte type fecDecoder struct {
flagCache []bool rxlimit int // queue size limit
dataShards int
parityShards int
shardSize int
rx []fecPacket // ordered receive queue
// RS decoder // caches
codec reedsolomon.Encoder decodeCache [][]byte
} flagCache []bool
)
// zeros
zeros []byte
// RS decoder
codec reedsolomon.Encoder
}
func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder { func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder {
if dataShards <= 0 || parityShards <= 0 { if dataShards <= 0 || parityShards <= 0 {
@@ -47,48 +48,40 @@ func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder {
return nil return nil
} }
fec := new(fecDecoder) dec := new(fecDecoder)
fec.rxlimit = rxlimit dec.rxlimit = rxlimit
fec.dataShards = dataShards dec.dataShards = dataShards
fec.parityShards = parityShards dec.parityShards = parityShards
fec.shardSize = dataShards + parityShards dec.shardSize = dataShards + parityShards
enc, err := reedsolomon.New(dataShards, parityShards) codec, err := reedsolomon.New(dataShards, parityShards)
if err != nil { if err != nil {
return nil return nil
} }
fec.codec = enc dec.codec = codec
fec.decodeCache = make([][]byte, fec.shardSize) dec.decodeCache = make([][]byte, dec.shardSize)
fec.flagCache = make([]bool, fec.shardSize) dec.flagCache = make([]bool, dec.shardSize)
return fec dec.zeros = make([]byte, mtuLimit)
} return dec
// decodeBytes a fec packet
func (dec *fecDecoder) decodeBytes(data []byte) fecPacket {
var pkt fecPacket
pkt.seqid = binary.LittleEndian.Uint32(data)
pkt.flag = binary.LittleEndian.Uint16(data[4:])
// allocate memory & copy
buf := xmitBuf.Get().([]byte)[:len(data)-6]
copy(buf, data[6:])
pkt.data = buf
return pkt
} }
// decode a fec packet // decode a fec packet
func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) { func (dec *fecDecoder) decode(in fecPacket) (recovered [][]byte) {
// insertion // insertion
n := len(dec.rx) - 1 n := len(dec.rx) - 1
insertIdx := 0 insertIdx := 0
for i := n; i >= 0; i-- { for i := n; i >= 0; i-- {
if pkt.seqid == dec.rx[i].seqid { // de-duplicate if in.seqid() == dec.rx[i].seqid() { // de-duplicate
xmitBuf.Put(pkt.data)
return nil return nil
} else if _itimediff(pkt.seqid, dec.rx[i].seqid) > 0 { // insertion } else if _itimediff(in.seqid(), dec.rx[i].seqid()) > 0 { // insertion
insertIdx = i + 1 insertIdx = i + 1
break break
} }
} }
// make a copy
pkt := fecPacket(xmitBuf.Get().([]byte)[:len(in)])
copy(pkt, in)
// insert into ordered rx queue // insert into ordered rx queue
if insertIdx == n+1 { if insertIdx == n+1 {
dec.rx = append(dec.rx, pkt) dec.rx = append(dec.rx, pkt)
@@ -99,11 +92,11 @@ func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
} }
// shard range for current packet // shard range for current packet
shardBegin := pkt.seqid - pkt.seqid%uint32(dec.shardSize) shardBegin := pkt.seqid() - pkt.seqid()%uint32(dec.shardSize)
shardEnd := shardBegin + uint32(dec.shardSize) - 1 shardEnd := shardBegin + uint32(dec.shardSize) - 1
// max search range in ordered queue for current shard // max search range in ordered queue for current shard
searchBegin := insertIdx - int(pkt.seqid%uint32(dec.shardSize)) searchBegin := insertIdx - int(pkt.seqid()%uint32(dec.shardSize))
if searchBegin < 0 { if searchBegin < 0 {
searchBegin = 0 searchBegin = 0
} }
@@ -116,7 +109,7 @@ func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
if searchEnd-searchBegin+1 >= dec.dataShards { if searchEnd-searchBegin+1 >= dec.dataShards {
var numshard, numDataShard, first, maxlen int var numshard, numDataShard, first, maxlen int
// zero cache // zero caches
shards := dec.decodeCache shards := dec.decodeCache
shardsflag := dec.flagCache shardsflag := dec.flagCache
for k := range dec.decodeCache { for k := range dec.decodeCache {
@@ -126,40 +119,43 @@ func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
// shard assembly // shard assembly
for i := searchBegin; i <= searchEnd; i++ { for i := searchBegin; i <= searchEnd; i++ {
seqid := dec.rx[i].seqid seqid := dec.rx[i].seqid()
if _itimediff(seqid, shardEnd) > 0 { if _itimediff(seqid, shardEnd) > 0 {
break break
} else if _itimediff(seqid, shardBegin) >= 0 { } else if _itimediff(seqid, shardBegin) >= 0 {
shards[seqid%uint32(dec.shardSize)] = dec.rx[i].data shards[seqid%uint32(dec.shardSize)] = dec.rx[i].data()
shardsflag[seqid%uint32(dec.shardSize)] = true shardsflag[seqid%uint32(dec.shardSize)] = true
numshard++ numshard++
if dec.rx[i].flag == typeData { if dec.rx[i].flag() == typeData {
numDataShard++ numDataShard++
} }
if numshard == 1 { if numshard == 1 {
first = i first = i
} }
if len(dec.rx[i].data) > maxlen { if len(dec.rx[i].data()) > maxlen {
maxlen = len(dec.rx[i].data) maxlen = len(dec.rx[i].data())
} }
} }
} }
if numDataShard == dec.dataShards { if numDataShard == dec.dataShards {
// case 1: no lost data shards // case 1: no loss on data shards
dec.rx = dec.freeRange(first, numshard, dec.rx) dec.rx = dec.freeRange(first, numshard, dec.rx)
} else if numshard >= dec.dataShards { } else if numshard >= dec.dataShards {
// case 2: data shard lost, but recoverable from parity shard // case 2: loss on data shards, but it's recoverable from parity shards
for k := range shards { for k := range shards {
if shards[k] != nil { if shards[k] != nil {
dlen := len(shards[k]) dlen := len(shards[k])
shards[k] = shards[k][:maxlen] shards[k] = shards[k][:maxlen]
xorBytes(shards[k][dlen:], shards[k][dlen:], shards[k][dlen:]) copy(shards[k][dlen:], dec.zeros)
} else {
shards[k] = xmitBuf.Get().([]byte)[:0]
} }
} }
if err := dec.codec.ReconstructData(shards); err == nil { if err := dec.codec.ReconstructData(shards); err == nil {
for k := range shards[:dec.dataShards] { for k := range shards[:dec.dataShards] {
if !shardsflag[k] { if !shardsflag[k] {
// recovered data should be recycled
recovered = append(recovered, shards[k]) recovered = append(recovered, shards[k])
} }
} }
@@ -170,7 +166,7 @@ func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
// keep rxlimit // keep rxlimit
if len(dec.rx) > dec.rxlimit { if len(dec.rx) > dec.rxlimit {
if dec.rx[0].flag == typeData { // record unrecoverable data if dec.rx[0].flag() == typeData { // track the unrecoverable data
atomic.AddUint64(&DefaultSnmp.FECShortShards, 1) atomic.AddUint64(&DefaultSnmp.FECShortShards, 1)
} }
dec.rx = dec.freeRange(0, 1, dec.rx) dec.rx = dec.freeRange(0, 1, dec.rx)
@@ -178,15 +174,16 @@ func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
return return
} }
// free a range of fecPacket, and zero for GC recycling // free a range of fecPacket
func (dec *fecDecoder) freeRange(first, n int, q []fecPacket) []fecPacket { func (dec *fecDecoder) freeRange(first, n int, q []fecPacket) []fecPacket {
for i := first; i < first+n; i++ { // free for i := first; i < first+n; i++ { // recycle buffer
xmitBuf.Put(q[i].data) xmitBuf.Put([]byte(q[i]))
}
if first == 0 && n < cap(q)/2 {
return q[n:]
} }
copy(q[first:], q[first+n:]) copy(q[first:], q[first+n:])
for i := 0; i < n; i++ { // dereference data
q[len(q)-1-i].data = nil
}
return q[:len(q)-n] return q[:len(q)-n]
} }
@@ -200,7 +197,7 @@ type (
next uint32 // next seqid next uint32 // next seqid
shardCount int // count the number of datashards collected shardCount int // count the number of datashards collected
maxSize int // record maximum data length in datashard maxSize int // track maximum data length in datashard
headerOffset int // FEC header offset headerOffset int // FEC header offset
payloadOffset int // FEC payload offset payloadOffset int // FEC payload offset
@@ -209,6 +206,9 @@ type (
shardCache [][]byte shardCache [][]byte
encodeCache [][]byte encodeCache [][]byte
// zeros
zeros []byte
// RS encoder // RS encoder
codec reedsolomon.Encoder codec reedsolomon.Encoder
} }
@@ -218,53 +218,57 @@ func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder {
if dataShards <= 0 || parityShards <= 0 { if dataShards <= 0 || parityShards <= 0 {
return nil return nil
} }
fec := new(fecEncoder) enc := new(fecEncoder)
fec.dataShards = dataShards enc.dataShards = dataShards
fec.parityShards = parityShards enc.parityShards = parityShards
fec.shardSize = dataShards + parityShards enc.shardSize = dataShards + parityShards
fec.paws = (0xffffffff/uint32(fec.shardSize) - 1) * uint32(fec.shardSize) enc.paws = 0xffffffff / uint32(enc.shardSize) * uint32(enc.shardSize)
fec.headerOffset = offset enc.headerOffset = offset
fec.payloadOffset = fec.headerOffset + fecHeaderSize enc.payloadOffset = enc.headerOffset + fecHeaderSize
enc, err := reedsolomon.New(dataShards, parityShards) codec, err := reedsolomon.New(dataShards, parityShards)
if err != nil { if err != nil {
return nil return nil
} }
fec.codec = enc enc.codec = codec
// caches // caches
fec.encodeCache = make([][]byte, fec.shardSize) enc.encodeCache = make([][]byte, enc.shardSize)
fec.shardCache = make([][]byte, fec.shardSize) enc.shardCache = make([][]byte, enc.shardSize)
for k := range fec.shardCache { for k := range enc.shardCache {
fec.shardCache[k] = make([]byte, mtuLimit) enc.shardCache[k] = make([]byte, mtuLimit)
} }
return fec enc.zeros = make([]byte, mtuLimit)
return enc
} }
// encode the packet, output parity shards if we have enough datashards // encodes the packet, outputs parity shards if we have collected quorum datashards
// the content of returned parityshards will change in next encode // notice: the contents of 'ps' will be re-written in successive calling
func (enc *fecEncoder) encode(b []byte) (ps [][]byte) { func (enc *fecEncoder) encode(b []byte) (ps [][]byte) {
// The header format:
// | FEC SEQID(4B) | FEC TYPE(2B) | SIZE (2B) | PAYLOAD(SIZE-2) |
// |<-headerOffset |<-payloadOffset
enc.markData(b[enc.headerOffset:]) enc.markData(b[enc.headerOffset:])
binary.LittleEndian.PutUint16(b[enc.payloadOffset:], uint16(len(b[enc.payloadOffset:]))) binary.LittleEndian.PutUint16(b[enc.payloadOffset:], uint16(len(b[enc.payloadOffset:])))
// copy data to fec datashards // copy data from payloadOffset to fec shard cache
sz := len(b) sz := len(b)
enc.shardCache[enc.shardCount] = enc.shardCache[enc.shardCount][:sz] enc.shardCache[enc.shardCount] = enc.shardCache[enc.shardCount][:sz]
copy(enc.shardCache[enc.shardCount], b) copy(enc.shardCache[enc.shardCount][enc.payloadOffset:], b[enc.payloadOffset:])
enc.shardCount++ enc.shardCount++
// record max datashard length // track max datashard length
if sz > enc.maxSize { if sz > enc.maxSize {
enc.maxSize = sz enc.maxSize = sz
} }
// calculate Reed-Solomon Erasure Code // Generation of Reed-Solomon Erasure Code
if enc.shardCount == enc.dataShards { if enc.shardCount == enc.dataShards {
// bzero each datashard's tail // fill '0' into the tail of each datashard
for i := 0; i < enc.dataShards; i++ { for i := 0; i < enc.dataShards; i++ {
shard := enc.shardCache[i] shard := enc.shardCache[i]
slen := len(shard) slen := len(shard)
xorBytes(shard[slen:enc.maxSize], shard[slen:enc.maxSize], shard[slen:enc.maxSize]) copy(shard[slen:enc.maxSize], enc.zeros)
} }
// construct equal-sized slice with stripped header // construct equal-sized slice with stripped header
@@ -273,16 +277,16 @@ func (enc *fecEncoder) encode(b []byte) (ps [][]byte) {
cache[k] = enc.shardCache[k][enc.payloadOffset:enc.maxSize] cache[k] = enc.shardCache[k][enc.payloadOffset:enc.maxSize]
} }
// rs encode // encoding
if err := enc.codec.Encode(cache); err == nil { if err := enc.codec.Encode(cache); err == nil {
ps = enc.shardCache[enc.dataShards:] ps = enc.shardCache[enc.dataShards:]
for k := range ps { for k := range ps {
enc.markFEC(ps[k][enc.headerOffset:]) enc.markParity(ps[k][enc.headerOffset:])
ps[k] = ps[k][:enc.maxSize] ps[k] = ps[k][:enc.maxSize]
} }
} }
// reset counters to zero // counters resetting
enc.shardCount = 0 enc.shardCount = 0
enc.maxSize = 0 enc.maxSize = 0
} }
@@ -296,8 +300,9 @@ func (enc *fecEncoder) markData(data []byte) {
enc.next++ enc.next++
} }
func (enc *fecEncoder) markFEC(data []byte) { func (enc *fecEncoder) markParity(data []byte) {
binary.LittleEndian.PutUint32(data, enc.next) binary.LittleEndian.PutUint32(data, enc.next)
binary.LittleEndian.PutUint16(data[4:], typeFEC) binary.LittleEndian.PutUint16(data[4:], typeParity)
// sequence wrap will only happen at parity shard
enc.next = (enc.next + 1) % enc.paws enc.next = (enc.next + 1) % enc.paws
} }

BIN
vendor/github.com/fatedier/kcp-go/flame.png generated vendored Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 56 KiB

View File

@@ -1,9 +1,9 @@
// Package kcp - A Fast and Reliable ARQ Protocol
package kcp package kcp
import ( import (
"encoding/binary" "encoding/binary"
"sync/atomic" "sync/atomic"
"time"
) )
const ( const (
@@ -30,6 +30,12 @@ const (
IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window IKCP_PROBE_LIMIT = 120000 // up to 120 secs to probe window
) )
// monotonic reference time point
var refTime time.Time = time.Now()
// currentMs returns current elasped monotonic milliseconds since program startup
func currentMs() uint32 { return uint32(time.Now().Sub(refTime) / time.Millisecond) }
// output_callback is a prototype which ought capture conn and call conn.Write // output_callback is a prototype which ought capture conn and call conn.Write
type output_callback func(buf []byte, size int) type output_callback func(buf []byte, size int)
@@ -104,6 +110,7 @@ type segment struct {
xmit uint32 xmit uint32
resendts uint32 resendts uint32
fastack uint32 fastack uint32
acked uint32 // mark if the seg has acked
data []byte data []byte
} }
@@ -144,8 +151,9 @@ type KCP struct {
acklist []ackItem acklist []ackItem
buffer []byte buffer []byte
output output_callback reserved int
output output_callback
} }
type ackItem struct { type ackItem struct {
@@ -153,8 +161,11 @@ type ackItem struct {
ts uint32 ts uint32
} }
// NewKCP create a new kcp control object, 'conv' must equal in two endpoint // NewKCP create a new kcp state machine
// from the same connection. //
// 'conv' must be equal in the connection peers, or else data will be silently rejected.
//
// 'output' function will be called whenever these is data to be sent on wire.
func NewKCP(conv uint32, output output_callback) *KCP { func NewKCP(conv uint32, output output_callback) *KCP {
kcp := new(KCP) kcp := new(KCP)
kcp.conv = conv kcp.conv = conv
@@ -163,7 +174,7 @@ func NewKCP(conv uint32, output output_callback) *KCP {
kcp.rmt_wnd = IKCP_WND_RCV kcp.rmt_wnd = IKCP_WND_RCV
kcp.mtu = IKCP_MTU_DEF kcp.mtu = IKCP_MTU_DEF
kcp.mss = kcp.mtu - IKCP_OVERHEAD kcp.mss = kcp.mtu - IKCP_OVERHEAD
kcp.buffer = make([]byte, (kcp.mtu+IKCP_OVERHEAD)*3) kcp.buffer = make([]byte, kcp.mtu)
kcp.rx_rto = IKCP_RTO_DEF kcp.rx_rto = IKCP_RTO_DEF
kcp.rx_minrto = IKCP_RTO_MIN kcp.rx_minrto = IKCP_RTO_MIN
kcp.interval = IKCP_INTERVAL kcp.interval = IKCP_INTERVAL
@@ -181,8 +192,24 @@ func (kcp *KCP) newSegment(size int) (seg segment) {
} }
// delSegment recycles a KCP segment // delSegment recycles a KCP segment
func (kcp *KCP) delSegment(seg segment) { func (kcp *KCP) delSegment(seg *segment) {
xmitBuf.Put(seg.data) if seg.data != nil {
xmitBuf.Put(seg.data)
seg.data = nil
}
}
// ReserveBytes keeps n bytes untouched from the beginning of the buffer,
// the output_callback function should be aware of this.
//
// Return false if n >= mss
func (kcp *KCP) ReserveBytes(n int) bool {
if n >= int(kcp.mtu-IKCP_OVERHEAD) || n < 0 {
return false
}
kcp.reserved = n
kcp.mss = kcp.mtu - IKCP_OVERHEAD - uint32(n)
return true
} }
// PeekSize checks the size of next message in the recv queue // PeekSize checks the size of next message in the recv queue
@@ -210,19 +237,21 @@ func (kcp *KCP) PeekSize() (length int) {
return return
} }
// Recv is user/upper level recv: returns size, returns below zero for EAGAIN // Receive data from kcp state machine
//
// Return number of bytes read.
//
// Return -1 when there is no readable data.
//
// Return -2 if len(buffer) is smaller than kcp.PeekSize().
func (kcp *KCP) Recv(buffer []byte) (n int) { func (kcp *KCP) Recv(buffer []byte) (n int) {
if len(kcp.rcv_queue) == 0 { peeksize := kcp.PeekSize()
if peeksize < 0 {
return -1 return -1
} }
peeksize := kcp.PeekSize()
if peeksize < 0 {
return -2
}
if peeksize > len(buffer) { if peeksize > len(buffer) {
return -3 return -2
} }
var fast_recover bool var fast_recover bool
@@ -238,7 +267,7 @@ func (kcp *KCP) Recv(buffer []byte) (n int) {
buffer = buffer[len(seg.data):] buffer = buffer[len(seg.data):]
n += len(seg.data) n += len(seg.data)
count++ count++
kcp.delSegment(*seg) kcp.delSegment(seg)
if seg.frg == 0 { if seg.frg == 0 {
break break
} }
@@ -251,7 +280,7 @@ func (kcp *KCP) Recv(buffer []byte) (n int) {
count = 0 count = 0
for k := range kcp.rcv_buf { for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k] seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) { if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue)+count < int(kcp.rcv_wnd) {
kcp.rcv_nxt++ kcp.rcv_nxt++
count++ count++
} else { } else {
@@ -382,10 +411,12 @@ func (kcp *KCP) parse_ack(sn uint32) {
for k := range kcp.snd_buf { for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k] seg := &kcp.snd_buf[k]
if sn == seg.sn { if sn == seg.sn {
kcp.delSegment(*seg) // mark and free space, but leave the segment here,
copy(kcp.snd_buf[k:], kcp.snd_buf[k+1:]) // and wait until `una` to delete this, then we don't
kcp.snd_buf[len(kcp.snd_buf)-1] = segment{} // have to shift the segments behind forward,
kcp.snd_buf = kcp.snd_buf[:len(kcp.snd_buf)-1] // which is an expensive operation for large window
seg.acked = 1
kcp.delSegment(seg)
break break
} }
if _itimediff(sn, seg.sn) < 0 { if _itimediff(sn, seg.sn) < 0 {
@@ -394,7 +425,7 @@ func (kcp *KCP) parse_ack(sn uint32) {
} }
} }
func (kcp *KCP) parse_fastack(sn uint32) { func (kcp *KCP) parse_fastack(sn, ts uint32) {
if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 { if _itimediff(sn, kcp.snd_una) < 0 || _itimediff(sn, kcp.snd_nxt) >= 0 {
return return
} }
@@ -403,7 +434,7 @@ func (kcp *KCP) parse_fastack(sn uint32) {
seg := &kcp.snd_buf[k] seg := &kcp.snd_buf[k]
if _itimediff(sn, seg.sn) < 0 { if _itimediff(sn, seg.sn) < 0 {
break break
} else if sn != seg.sn { } else if sn != seg.sn && _itimediff(seg.ts, ts) <= 0 {
seg.fastack++ seg.fastack++
} }
} }
@@ -414,7 +445,7 @@ func (kcp *KCP) parse_una(una uint32) {
for k := range kcp.snd_buf { for k := range kcp.snd_buf {
seg := &kcp.snd_buf[k] seg := &kcp.snd_buf[k]
if _itimediff(una, seg.sn) > 0 { if _itimediff(una, seg.sn) > 0 {
kcp.delSegment(*seg) kcp.delSegment(seg)
count++ count++
} else { } else {
break break
@@ -430,12 +461,12 @@ func (kcp *KCP) ack_push(sn, ts uint32) {
kcp.acklist = append(kcp.acklist, ackItem{sn, ts}) kcp.acklist = append(kcp.acklist, ackItem{sn, ts})
} }
func (kcp *KCP) parse_data(newseg segment) { // returns true if data has repeated
func (kcp *KCP) parse_data(newseg segment) bool {
sn := newseg.sn sn := newseg.sn
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 || if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) >= 0 ||
_itimediff(sn, kcp.rcv_nxt) < 0 { _itimediff(sn, kcp.rcv_nxt) < 0 {
kcp.delSegment(newseg) return true
return
} }
n := len(kcp.rcv_buf) - 1 n := len(kcp.rcv_buf) - 1
@@ -445,7 +476,6 @@ func (kcp *KCP) parse_data(newseg segment) {
seg := &kcp.rcv_buf[i] seg := &kcp.rcv_buf[i]
if seg.sn == sn { if seg.sn == sn {
repeat = true repeat = true
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
break break
} }
if _itimediff(sn, seg.sn) > 0 { if _itimediff(sn, seg.sn) > 0 {
@@ -455,6 +485,11 @@ func (kcp *KCP) parse_data(newseg segment) {
} }
if !repeat { if !repeat {
// replicate the content if it's new
dataCopy := xmitBuf.Get().([]byte)[:len(newseg.data)]
copy(dataCopy, newseg.data)
newseg.data = dataCopy
if insert_idx == n+1 { if insert_idx == n+1 {
kcp.rcv_buf = append(kcp.rcv_buf, newseg) kcp.rcv_buf = append(kcp.rcv_buf, newseg)
} else { } else {
@@ -462,15 +497,13 @@ func (kcp *KCP) parse_data(newseg segment) {
copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:]) copy(kcp.rcv_buf[insert_idx+1:], kcp.rcv_buf[insert_idx:])
kcp.rcv_buf[insert_idx] = newseg kcp.rcv_buf[insert_idx] = newseg
} }
} else {
kcp.delSegment(newseg)
} }
// move available data from rcv_buf -> rcv_queue // move available data from rcv_buf -> rcv_queue
count := 0 count := 0
for k := range kcp.rcv_buf { for k := range kcp.rcv_buf {
seg := &kcp.rcv_buf[k] seg := &kcp.rcv_buf[k]
if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue) < int(kcp.rcv_wnd) { if seg.sn == kcp.rcv_nxt && len(kcp.rcv_queue)+count < int(kcp.rcv_wnd) {
kcp.rcv_nxt++ kcp.rcv_nxt++
count++ count++
} else { } else {
@@ -481,18 +514,23 @@ func (kcp *KCP) parse_data(newseg segment) {
kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...) kcp.rcv_queue = append(kcp.rcv_queue, kcp.rcv_buf[:count]...)
kcp.rcv_buf = kcp.remove_front(kcp.rcv_buf, count) kcp.rcv_buf = kcp.remove_front(kcp.rcv_buf, count)
} }
return repeat
} }
// Input when you received a low level packet (eg. UDP packet), call it // Input a packet into kcp state machine.
// regular indicates a regular packet has received(not from FEC) //
// 'regular' indicates it's a real data packet from remote, and it means it's not generated from ReedSolomon
// codecs.
//
// 'ackNoDelay' will trigger immediate ACK, but surely it will not be efficient in bandwidth
func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int { func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
una := kcp.snd_una snd_una := kcp.snd_una
if len(data) < IKCP_OVERHEAD { if len(data) < IKCP_OVERHEAD {
return -1 return -1
} }
var maxack uint32 var latest uint32 // the latest ack packet
var lastackts uint32
var flag int var flag int
var inSegs uint64 var inSegs uint64
@@ -535,19 +573,15 @@ func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
if cmd == IKCP_CMD_ACK { if cmd == IKCP_CMD_ACK {
kcp.parse_ack(sn) kcp.parse_ack(sn)
kcp.shrink_buf() kcp.parse_fastack(sn, ts)
if flag == 0 { flag |= 1
flag = 1 latest = ts
maxack = sn
} else if _itimediff(sn, maxack) > 0 {
maxack = sn
}
lastackts = ts
} else if cmd == IKCP_CMD_PUSH { } else if cmd == IKCP_CMD_PUSH {
repeat := true
if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 { if _itimediff(sn, kcp.rcv_nxt+kcp.rcv_wnd) < 0 {
kcp.ack_push(sn, ts) kcp.ack_push(sn, ts)
if _itimediff(sn, kcp.rcv_nxt) >= 0 { if _itimediff(sn, kcp.rcv_nxt) >= 0 {
seg := kcp.newSegment(int(length)) var seg segment
seg.conv = conv seg.conv = conv
seg.cmd = cmd seg.cmd = cmd
seg.frg = frg seg.frg = frg
@@ -555,12 +589,11 @@ func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
seg.ts = ts seg.ts = ts
seg.sn = sn seg.sn = sn
seg.una = una seg.una = una
copy(seg.data, data[:length]) seg.data = data[:length] // delayed data copying
kcp.parse_data(seg) repeat = kcp.parse_data(seg)
} else {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
} }
} else { }
if regular && repeat {
atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1) atomic.AddUint64(&DefaultSnmp.RepeatSegs, 1)
} }
} else if cmd == IKCP_CMD_WASK { } else if cmd == IKCP_CMD_WASK {
@@ -578,40 +611,42 @@ func (kcp *KCP) Input(data []byte, regular, ackNoDelay bool) int {
} }
atomic.AddUint64(&DefaultSnmp.InSegs, inSegs) atomic.AddUint64(&DefaultSnmp.InSegs, inSegs)
// update rtt with the latest ts
// ignore the FEC packet
if flag != 0 && regular { if flag != 0 && regular {
kcp.parse_fastack(maxack)
current := currentMs() current := currentMs()
if _itimediff(current, lastackts) >= 0 { if _itimediff(current, latest) >= 0 {
kcp.update_ack(_itimediff(current, lastackts)) kcp.update_ack(_itimediff(current, latest))
} }
} }
if _itimediff(kcp.snd_una, una) > 0 { // cwnd update when packet arrived
if kcp.cwnd < kcp.rmt_wnd { if kcp.nocwnd == 0 {
mss := kcp.mss if _itimediff(kcp.snd_una, snd_una) > 0 {
if kcp.cwnd < kcp.ssthresh { if kcp.cwnd < kcp.rmt_wnd {
kcp.cwnd++ mss := kcp.mss
kcp.incr += mss if kcp.cwnd < kcp.ssthresh {
} else {
if kcp.incr < mss {
kcp.incr = mss
}
kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
if (kcp.cwnd+1)*mss <= kcp.incr {
kcp.cwnd++ kcp.cwnd++
kcp.incr += mss
} else {
if kcp.incr < mss {
kcp.incr = mss
}
kcp.incr += (mss*mss)/kcp.incr + (mss / 16)
if (kcp.cwnd+1)*mss <= kcp.incr {
kcp.cwnd++
}
}
if kcp.cwnd > kcp.rmt_wnd {
kcp.cwnd = kcp.rmt_wnd
kcp.incr = kcp.rmt_wnd * mss
} }
}
if kcp.cwnd > kcp.rmt_wnd {
kcp.cwnd = kcp.rmt_wnd
kcp.incr = kcp.rmt_wnd * mss
} }
} }
} }
if ackNoDelay && len(kcp.acklist) > 0 { // ack immediately if ackNoDelay && len(kcp.acklist) > 0 { // ack immediately
kcp.flush(true) kcp.flush(true)
} else if kcp.rmt_wnd == 0 && len(kcp.acklist) > 0 { // window zero
kcp.flush(true)
} }
return 0 return 0
} }
@@ -624,7 +659,7 @@ func (kcp *KCP) wnd_unused() uint16 {
} }
// flush pending data // flush pending data
func (kcp *KCP) flush(ackOnly bool) { func (kcp *KCP) flush(ackOnly bool) uint32 {
var seg segment var seg segment
seg.conv = kcp.conv seg.conv = kcp.conv
seg.cmd = IKCP_CMD_ACK seg.cmd = IKCP_CMD_ACK
@@ -632,14 +667,28 @@ func (kcp *KCP) flush(ackOnly bool) {
seg.una = kcp.rcv_nxt seg.una = kcp.rcv_nxt
buffer := kcp.buffer buffer := kcp.buffer
// flush acknowledges ptr := buffer[kcp.reserved:] // keep n bytes untouched
ptr := buffer
for i, ack := range kcp.acklist { // makeSpace makes room for writing
makeSpace := func(space int) {
size := len(buffer) - len(ptr) size := len(buffer) - len(ptr)
if size+IKCP_OVERHEAD > int(kcp.mtu) { if size+space > int(kcp.mtu) {
kcp.output(buffer, size) kcp.output(buffer, size)
ptr = buffer ptr = buffer[kcp.reserved:]
} }
}
// flush bytes in buffer if there is any
flushBuffer := func() {
size := len(buffer) - len(ptr)
if size > kcp.reserved {
kcp.output(buffer, size)
}
}
// flush acknowledges
for i, ack := range kcp.acklist {
makeSpace(IKCP_OVERHEAD)
// filter jitters caused by bufferbloat // filter jitters caused by bufferbloat
if ack.sn >= kcp.rcv_nxt || len(kcp.acklist)-1 == i { if ack.sn >= kcp.rcv_nxt || len(kcp.acklist)-1 == i {
seg.sn, seg.ts = ack.sn, ack.ts seg.sn, seg.ts = ack.sn, ack.ts
@@ -649,11 +698,8 @@ func (kcp *KCP) flush(ackOnly bool) {
kcp.acklist = kcp.acklist[0:0] kcp.acklist = kcp.acklist[0:0]
if ackOnly { // flash remain ack segments if ackOnly { // flash remain ack segments
size := len(buffer) - len(ptr) flushBuffer()
if size > 0 { return kcp.interval
kcp.output(buffer, size)
}
return
} }
// probe window size (if remote window size equals zero) // probe window size (if remote window size equals zero)
@@ -683,22 +729,14 @@ func (kcp *KCP) flush(ackOnly bool) {
// flush window probing commands // flush window probing commands
if (kcp.probe & IKCP_ASK_SEND) != 0 { if (kcp.probe & IKCP_ASK_SEND) != 0 {
seg.cmd = IKCP_CMD_WASK seg.cmd = IKCP_CMD_WASK
size := len(buffer) - len(ptr) makeSpace(IKCP_OVERHEAD)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr) ptr = seg.encode(ptr)
} }
// flush window probing commands // flush window probing commands
if (kcp.probe & IKCP_ASK_TELL) != 0 { if (kcp.probe & IKCP_ASK_TELL) != 0 {
seg.cmd = IKCP_CMD_WINS seg.cmd = IKCP_CMD_WINS
size := len(buffer) - len(ptr) makeSpace(IKCP_OVERHEAD)
if size+IKCP_OVERHEAD > int(kcp.mtu) {
kcp.output(buffer, size)
ptr = buffer
}
ptr = seg.encode(ptr) ptr = seg.encode(ptr)
} }
@@ -723,7 +761,6 @@ func (kcp *KCP) flush(ackOnly bool) {
kcp.snd_buf = append(kcp.snd_buf, newseg) kcp.snd_buf = append(kcp.snd_buf, newseg)
kcp.snd_nxt++ kcp.snd_nxt++
newSegsCount++ newSegsCount++
kcp.snd_queue[k].data = nil
} }
if newSegsCount > 0 { if newSegsCount > 0 {
kcp.snd_queue = kcp.remove_front(kcp.snd_queue, newSegsCount) kcp.snd_queue = kcp.remove_front(kcp.snd_queue, newSegsCount)
@@ -738,9 +775,15 @@ func (kcp *KCP) flush(ackOnly bool) {
// check for retransmissions // check for retransmissions
current := currentMs() current := currentMs()
var change, lost, lostSegs, fastRetransSegs, earlyRetransSegs uint64 var change, lost, lostSegs, fastRetransSegs, earlyRetransSegs uint64
for k := range kcp.snd_buf { minrto := int32(kcp.interval)
segment := &kcp.snd_buf[k]
ref := kcp.snd_buf[:len(kcp.snd_buf)] // for bounds check elimination
for k := range ref {
segment := &ref[k]
needsend := false needsend := false
if segment.acked == 1 {
continue
}
if segment.xmit == 0 { // initial transmit if segment.xmit == 0 { // initial transmit
needsend = true needsend = true
segment.rto = kcp.rx_rto segment.rto = kcp.rx_rto
@@ -772,20 +815,14 @@ func (kcp *KCP) flush(ackOnly bool) {
} }
if needsend { if needsend {
current = currentMs()
segment.xmit++ segment.xmit++
segment.ts = current segment.ts = current
segment.wnd = seg.wnd segment.wnd = seg.wnd
segment.una = seg.una segment.una = seg.una
size := len(buffer) - len(ptr)
need := IKCP_OVERHEAD + len(segment.data) need := IKCP_OVERHEAD + len(segment.data)
makeSpace(need)
if size+need > int(kcp.mtu) {
kcp.output(buffer, size)
current = currentMs() // time update for a blocking call
ptr = buffer
}
ptr = segment.encode(ptr) ptr = segment.encode(ptr)
copy(ptr, segment.data) copy(ptr, segment.data)
ptr = ptr[len(segment.data):] ptr = ptr[len(segment.data):]
@@ -794,13 +831,15 @@ func (kcp *KCP) flush(ackOnly bool) {
kcp.state = 0xFFFFFFFF kcp.state = 0xFFFFFFFF
} }
} }
// get the nearest rto
if rto := _itimediff(segment.resendts, current); rto > 0 && rto < minrto {
minrto = rto
}
} }
// flash remain segments // flash remain segments
size := len(buffer) - len(ptr) flushBuffer()
if size > 0 {
kcp.output(buffer, size)
}
// counter updates // counter updates
sum := lostSegs sum := lostSegs
@@ -819,34 +858,41 @@ func (kcp *KCP) flush(ackOnly bool) {
atomic.AddUint64(&DefaultSnmp.RetransSegs, sum) atomic.AddUint64(&DefaultSnmp.RetransSegs, sum)
} }
// update ssthresh // cwnd update
// rate halving, https://tools.ietf.org/html/rfc6937 if kcp.nocwnd == 0 {
if change > 0 { // update ssthresh
inflight := kcp.snd_nxt - kcp.snd_una // rate halving, https://tools.ietf.org/html/rfc6937
kcp.ssthresh = inflight / 2 if change > 0 {
if kcp.ssthresh < IKCP_THRESH_MIN { inflight := kcp.snd_nxt - kcp.snd_una
kcp.ssthresh = IKCP_THRESH_MIN kcp.ssthresh = inflight / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = kcp.ssthresh + resent
kcp.incr = kcp.cwnd * kcp.mss
}
// congestion control, https://tools.ietf.org/html/rfc5681
if lost > 0 {
kcp.ssthresh = cwnd / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = 1
kcp.incr = kcp.mss
}
if kcp.cwnd < 1 {
kcp.cwnd = 1
kcp.incr = kcp.mss
} }
kcp.cwnd = kcp.ssthresh + resent
kcp.incr = kcp.cwnd * kcp.mss
} }
// congestion control, https://tools.ietf.org/html/rfc5681 return uint32(minrto)
if lost > 0 {
kcp.ssthresh = cwnd / 2
if kcp.ssthresh < IKCP_THRESH_MIN {
kcp.ssthresh = IKCP_THRESH_MIN
}
kcp.cwnd = 1
kcp.incr = kcp.mss
}
if kcp.cwnd < 1 {
kcp.cwnd = 1
kcp.incr = kcp.mss
}
} }
// (deprecated)
//
// Update updates state (call it repeatedly, every 10ms-100ms), or you can ask // Update updates state (call it repeatedly, every 10ms-100ms), or you can ask
// ikcp_check when to call it again (without ikcp_input/_send calling). // ikcp_check when to call it again (without ikcp_input/_send calling).
// 'current' - current timestamp in millisec. // 'current' - current timestamp in millisec.
@@ -875,6 +921,8 @@ func (kcp *KCP) Update() {
} }
} }
// (deprecated)
//
// Check determines when should you invoke ikcp_update: // Check determines when should you invoke ikcp_update:
// returns when you should invoke ikcp_update in millisec, if there // returns when you should invoke ikcp_update in millisec, if there
// is no ikcp_input/_send calling. you can call ikcp_update in that // is no ikcp_input/_send calling. you can call ikcp_update in that
@@ -930,12 +978,16 @@ func (kcp *KCP) SetMtu(mtu int) int {
if mtu < 50 || mtu < IKCP_OVERHEAD { if mtu < 50 || mtu < IKCP_OVERHEAD {
return -1 return -1
} }
buffer := make([]byte, (mtu+IKCP_OVERHEAD)*3) if kcp.reserved >= int(kcp.mtu-IKCP_OVERHEAD) || kcp.reserved < 0 {
return -1
}
buffer := make([]byte, mtu)
if buffer == nil { if buffer == nil {
return -2 return -2
} }
kcp.mtu = uint32(mtu) kcp.mtu = uint32(mtu)
kcp.mss = kcp.mtu - IKCP_OVERHEAD kcp.mss = kcp.mtu - IKCP_OVERHEAD - uint32(kcp.reserved)
kcp.buffer = buffer kcp.buffer = buffer
return 0 return 0
} }
@@ -989,10 +1041,13 @@ func (kcp *KCP) WaitSnd() int {
} }
// remove front n elements from queue // remove front n elements from queue
// if the number of elements to remove is more than half of the size.
// just shift the rear elements to front, otherwise just reslice q to q[n:]
// then the cost of runtime.growslice can always be less than n/2
func (kcp *KCP) remove_front(q []segment, n int) []segment { func (kcp *KCP) remove_front(q []segment, n int) []segment {
newn := copy(q, q[n:]) if n > cap(q)/2 {
for i := newn; i < len(q); i++ { newn := copy(q, q[n:])
q[i] = segment{} // manual set nil for GC return q[:newn]
} }
return q[:newn] return q[n:]
} }

48
vendor/github.com/fatedier/kcp-go/readloop.go generated vendored Normal file
View File

@@ -0,0 +1,48 @@
package kcp
import (
"sync/atomic"
"github.com/pkg/errors"
)
func (s *UDPSession) defaultReadLoop() {
buf := make([]byte, mtuLimit)
var src string
for {
if n, addr, err := s.conn.ReadFrom(buf); err == nil {
// make sure the packet is from the same source
if src == "" { // set source address
src = addr.String()
} else if addr.String() != src {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
continue
}
if n >= s.headerSize+IKCP_OVERHEAD {
s.packetInput(buf[:n])
} else {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
}
} else {
s.notifyReadError(errors.WithStack(err))
return
}
}
}
func (l *Listener) defaultMonitor() {
buf := make([]byte, mtuLimit)
for {
if n, from, err := l.conn.ReadFrom(buf); err == nil {
if n >= l.headerSize+IKCP_OVERHEAD {
l.packetInput(buf[:n], from)
} else {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
}
} else {
l.notifyReadError(errors.WithStack(err))
return
}
}
}

11
vendor/github.com/fatedier/kcp-go/readloop_generic.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
// +build !linux
package kcp
func (s *UDPSession) readLoop() {
s.defaultReadLoop()
}
func (l *Listener) monitor() {
l.defaultMonitor()
}

120
vendor/github.com/fatedier/kcp-go/readloop_linux.go generated vendored Normal file
View File

@@ -0,0 +1,120 @@
// +build linux
package kcp
import (
"net"
"os"
"sync/atomic"
"github.com/pkg/errors"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
)
// the read loop for a client session
func (s *UDPSession) readLoop() {
// default version
if s.xconn == nil {
s.defaultReadLoop()
return
}
// x/net version
var src string
msgs := make([]ipv4.Message, batchSize)
for k := range msgs {
msgs[k].Buffers = [][]byte{make([]byte, mtuLimit)}
}
for {
if count, err := s.xconn.ReadBatch(msgs, 0); err == nil {
for i := 0; i < count; i++ {
msg := &msgs[i]
// make sure the packet is from the same source
if src == "" { // set source address if nil
src = msg.Addr.String()
} else if msg.Addr.String() != src {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
continue
}
if msg.N < s.headerSize+IKCP_OVERHEAD {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
continue
}
// source and size has validated
s.packetInput(msg.Buffers[0][:msg.N])
}
} else {
// compatibility issue:
// for linux kernel<=2.6.32, support for sendmmsg is not available
// an error of type os.SyscallError will be returned
if operr, ok := err.(*net.OpError); ok {
if se, ok := operr.Err.(*os.SyscallError); ok {
if se.Syscall == "recvmmsg" {
s.defaultReadLoop()
return
}
}
}
s.notifyReadError(errors.WithStack(err))
return
}
}
}
// monitor incoming data for all connections of server
func (l *Listener) monitor() {
var xconn batchConn
if _, ok := l.conn.(*net.UDPConn); ok {
addr, err := net.ResolveUDPAddr("udp", l.conn.LocalAddr().String())
if err == nil {
if addr.IP.To4() != nil {
xconn = ipv4.NewPacketConn(l.conn)
} else {
xconn = ipv6.NewPacketConn(l.conn)
}
}
}
// default version
if xconn == nil {
l.defaultMonitor()
return
}
// x/net version
msgs := make([]ipv4.Message, batchSize)
for k := range msgs {
msgs[k].Buffers = [][]byte{make([]byte, mtuLimit)}
}
for {
if count, err := xconn.ReadBatch(msgs, 0); err == nil {
for i := 0; i < count; i++ {
msg := &msgs[i]
if msg.N >= l.headerSize+IKCP_OVERHEAD {
l.packetInput(msg.Buffers[0][:msg.N], msg.Addr)
} else {
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
}
}
} else {
// compatibility issue:
// for linux kernel<=2.6.32, support for sendmmsg is not available
// an error of type os.SyscallError will be returned
if operr, ok := err.(*net.OpError); ok {
if se, ok := operr.Err.(*os.SyscallError); ok {
if se.Syscall == "recvmmsg" {
l.defaultMonitor()
return
}
}
}
l.notifyReadError(errors.WithStack(err))
return
}
}
}

File diff suppressed because it is too large Load Diff

25
vendor/github.com/fatedier/kcp-go/tx.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
package kcp
import (
"sync/atomic"
"github.com/pkg/errors"
"golang.org/x/net/ipv4"
)
func (s *UDPSession) defaultTx(txqueue []ipv4.Message) {
nbytes := 0
npkts := 0
for k := range txqueue {
if n, err := s.conn.WriteTo(txqueue[k].Buffers[0], txqueue[k].Addr); err == nil {
nbytes += n
npkts++
xmitBuf.Put(txqueue[k].Buffers[0])
} else {
s.notifyWriteError(errors.WithStack(err))
break
}
}
atomic.AddUint64(&DefaultSnmp.OutPkts, uint64(npkts))
atomic.AddUint64(&DefaultSnmp.OutBytes, uint64(nbytes))
}

11
vendor/github.com/fatedier/kcp-go/tx_generic.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
// +build !linux
package kcp
import (
"golang.org/x/net/ipv4"
)
func (s *UDPSession) tx(txqueue []ipv4.Message) {
s.defaultTx(txqueue)
}

52
vendor/github.com/fatedier/kcp-go/tx_linux.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
// +build linux
package kcp
import (
"net"
"os"
"sync/atomic"
"github.com/pkg/errors"
"golang.org/x/net/ipv4"
)
func (s *UDPSession) tx(txqueue []ipv4.Message) {
// default version
if s.xconn == nil || s.xconnWriteError != nil {
s.defaultTx(txqueue)
return
}
// x/net version
nbytes := 0
npkts := 0
for len(txqueue) > 0 {
if n, err := s.xconn.WriteBatch(txqueue, 0); err == nil {
for k := range txqueue[:n] {
nbytes += len(txqueue[k].Buffers[0])
xmitBuf.Put(txqueue[k].Buffers[0])
}
npkts += n
txqueue = txqueue[n:]
} else {
// compatibility issue:
// for linux kernel<=2.6.32, support for sendmmsg is not available
// an error of type os.SyscallError will be returned
if operr, ok := err.(*net.OpError); ok {
if se, ok := operr.Err.(*os.SyscallError); ok {
if se.Syscall == "sendmmsg" {
s.xconnWriteError = se
s.defaultTx(txqueue)
return
}
}
}
s.notifyWriteError(errors.WithStack(err))
break
}
}
atomic.AddUint64(&DefaultSnmp.OutPkts, uint64(npkts))
atomic.AddUint64(&DefaultSnmp.OutBytes, uint64(nbytes))
}

View File

@@ -76,29 +76,28 @@ func (h *updateHeap) wakeup() {
} }
func (h *updateHeap) updateTask() { func (h *updateHeap) updateTask() {
var timer <-chan time.Time timer := time.NewTimer(0)
for { for {
select { select {
case <-timer: case <-timer.C:
case <-h.chWakeUp: case <-h.chWakeUp:
} }
h.mu.Lock() h.mu.Lock()
hlen := h.Len() hlen := h.Len()
now := time.Now()
for i := 0; i < hlen; i++ { for i := 0; i < hlen; i++ {
entry := heap.Pop(h).(entry) entry := &h.entries[0]
if now.After(entry.ts) { if !time.Now().Before(entry.ts) {
entry.ts = now.Add(entry.s.update()) interval := entry.s.update()
heap.Push(h, entry) entry.ts = time.Now().Add(interval)
heap.Fix(h, 0)
} else { } else {
heap.Push(h, entry)
break break
} }
} }
if hlen > 0 { if hlen > 0 {
timer = time.After(h.entries[0].ts.Sub(now)) timer.Reset(h.entries[0].ts.Sub(time.Now()))
} }
h.mu.Unlock() h.mu.Unlock()
} }

View File

@@ -1,110 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package kcp
import (
"runtime"
"unsafe"
)
const wordSize = int(unsafe.Sizeof(uintptr(0)))
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
// fastXORBytes xors in bulk. It only works on architectures that
// support unaligned read/writes.
func fastXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
w := n / wordSize
if w > 0 {
wordBytes := w * wordSize
fastXORWords(dst[:wordBytes], a[:wordBytes], b[:wordBytes])
}
for i := (n - n%wordSize); i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}
func safeXORBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
ex := n % 8
for i := 0; i < ex; i++ {
dst[i] = a[i] ^ b[i]
}
for i := ex; i < n; i += 8 {
_dst := dst[i : i+8]
_a := a[i : i+8]
_b := b[i : i+8]
_dst[0] = _a[0] ^ _b[0]
_dst[1] = _a[1] ^ _b[1]
_dst[2] = _a[2] ^ _b[2]
_dst[3] = _a[3] ^ _b[3]
_dst[4] = _a[4] ^ _b[4]
_dst[5] = _a[5] ^ _b[5]
_dst[6] = _a[6] ^ _b[6]
_dst[7] = _a[7] ^ _b[7]
}
return n
}
// xorBytes xors the bytes in a and b. The destination is assumed to have enough
// space. Returns the number of bytes xor'd.
func xorBytes(dst, a, b []byte) int {
if supportsUnaligned {
return fastXORBytes(dst, a, b)
}
// TODO(hanwen): if (dst, a, b) have common alignment
// we could still try fastXORBytes. It is not clear
// how often this happens, and it's only worth it if
// the block encryption itself is hardware
// accelerated.
return safeXORBytes(dst, a, b)
}
// fastXORWords XORs multiples of 4 or 8 bytes (depending on architecture.)
// The arguments are assumed to be of equal length.
func fastXORWords(dst, a, b []byte) {
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
aw := *(*[]uintptr)(unsafe.Pointer(&a))
bw := *(*[]uintptr)(unsafe.Pointer(&b))
n := len(b) / wordSize
ex := n % 8
for i := 0; i < ex; i++ {
dw[i] = aw[i] ^ bw[i]
}
for i := ex; i < n; i += 8 {
_dw := dw[i : i+8]
_aw := aw[i : i+8]
_bw := bw[i : i+8]
_dw[0] = _aw[0] ^ _bw[0]
_dw[1] = _aw[1] ^ _bw[1]
_dw[2] = _aw[2] ^ _bw[2]
_dw[3] = _aw[3] ^ _bw[3]
_dw[4] = _aw[4] ^ _bw[4]
_dw[5] = _aw[5] ^ _bw[5]
_dw[6] = _aw[6] ^ _bw[6]
_dw[7] = _aw[7] ^ _bw[7]
}
}
func xorWords(dst, a, b []byte) {
if supportsUnaligned {
fastXORWords(dst, a, b)
} else {
safeXORBytes(dst, a, b)
}
}

View File

@@ -1,19 +0,0 @@
language: go
sudo: false
matrix:
include:
- go: 1.3
- go: 1.4
- go: 1.5
- go: 1.6
- go: 1.7
- go: tip
allow_failures:
- go: tip
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d .)
- go vet $(go list ./... | grep -v /vendor/)
- go test -v -race ./...

View File

@@ -1,10 +0,0 @@
context
=======
[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
gorilla/context is a general purpose registry for global request variables.
> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context

View File

@@ -1,143 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package context
import (
"net/http"
"sync"
"time"
)
var (
mutex sync.RWMutex
data = make(map[*http.Request]map[interface{}]interface{})
datat = make(map[*http.Request]int64)
)
// Set stores a value for a given key in a given request.
func Set(r *http.Request, key, val interface{}) {
mutex.Lock()
if data[r] == nil {
data[r] = make(map[interface{}]interface{})
datat[r] = time.Now().Unix()
}
data[r][key] = val
mutex.Unlock()
}
// Get returns a value stored for a given key in a given request.
func Get(r *http.Request, key interface{}) interface{} {
mutex.RLock()
if ctx := data[r]; ctx != nil {
value := ctx[key]
mutex.RUnlock()
return value
}
mutex.RUnlock()
return nil
}
// GetOk returns stored value and presence state like multi-value return of map access.
func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
mutex.RLock()
if _, ok := data[r]; ok {
value, ok := data[r][key]
mutex.RUnlock()
return value, ok
}
mutex.RUnlock()
return nil, false
}
// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
func GetAll(r *http.Request) map[interface{}]interface{} {
mutex.RLock()
if context, ok := data[r]; ok {
result := make(map[interface{}]interface{}, len(context))
for k, v := range context {
result[k] = v
}
mutex.RUnlock()
return result
}
mutex.RUnlock()
return nil
}
// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
// the request was registered.
func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
mutex.RLock()
context, ok := data[r]
result := make(map[interface{}]interface{}, len(context))
for k, v := range context {
result[k] = v
}
mutex.RUnlock()
return result, ok
}
// Delete removes a value stored for a given key in a given request.
func Delete(r *http.Request, key interface{}) {
mutex.Lock()
if data[r] != nil {
delete(data[r], key)
}
mutex.Unlock()
}
// Clear removes all values stored for a given request.
//
// This is usually called by a handler wrapper to clean up request
// variables at the end of a request lifetime. See ClearHandler().
func Clear(r *http.Request) {
mutex.Lock()
clear(r)
mutex.Unlock()
}
// clear is Clear without the lock.
func clear(r *http.Request) {
delete(data, r)
delete(datat, r)
}
// Purge removes request data stored for longer than maxAge, in seconds.
// It returns the amount of requests removed.
//
// If maxAge <= 0, all request data is removed.
//
// This is only used for sanity check: in case context cleaning was not
// properly set some request data can be kept forever, consuming an increasing
// amount of memory. In case this is detected, Purge() must be called
// periodically until the problem is fixed.
func Purge(maxAge int) int {
mutex.Lock()
count := 0
if maxAge <= 0 {
count = len(data)
data = make(map[*http.Request]map[interface{}]interface{})
datat = make(map[*http.Request]int64)
} else {
min := time.Now().Unix() - int64(maxAge)
for r := range data {
if datat[r] < min {
clear(r)
count++
}
}
}
mutex.Unlock()
return count
}
// ClearHandler wraps an http.Handler and clears request values at the end
// of a request lifetime.
func ClearHandler(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer Clear(r)
h.ServeHTTP(w, r)
})
}

View File

@@ -1,88 +0,0 @@
// Copyright 2012 The Gorilla Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package context stores values shared during a request lifetime.
Note: gorilla/context, having been born well before `context.Context` existed,
does not play well > with the shallow copying of the request that
[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext)
(added to net/http Go 1.7 onwards) performs. You should either use *just*
gorilla/context, or moving forward, the new `http.Request.Context()`.
For example, a router can set variables extracted from the URL and later
application handlers can access those values, or it can be used to store
sessions values to be saved at the end of a request. There are several
others common uses.
The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
Here's the basic usage: first define the keys that you will need. The key
type is interface{} so a key can be of any type that supports equality.
Here we define a key using a custom int type to avoid name collisions:
package foo
import (
"github.com/gorilla/context"
)
type key int
const MyKey key = 0
Then set a variable. Variables are bound to an http.Request object, so you
need a request instance to set a value:
context.Set(r, MyKey, "bar")
The application can later access the variable using the same key you provided:
func MyHandler(w http.ResponseWriter, r *http.Request) {
// val is "bar".
val := context.Get(r, foo.MyKey)
// returns ("bar", true)
val, ok := context.GetOk(r, foo.MyKey)
// ...
}
And that's all about the basic usage. We discuss some other ideas below.
Any type can be stored in the context. To enforce a given type, make the key
private and wrap Get() and Set() to accept and return values of a specific
type:
type key int
const mykey key = 0
// GetMyKey returns a value for this package from the request values.
func GetMyKey(r *http.Request) SomeType {
if rv := context.Get(r, mykey); rv != nil {
return rv.(SomeType)
}
return nil
}
// SetMyKey sets a value for this package in the request values.
func SetMyKey(r *http.Request, val SomeType) {
context.Set(r, mykey, val)
}
Variables must be cleared at the end of a request, to remove all values
that were stored. This can be done in an http.Handler, after a request was
served. Just call Clear() passing the request:
context.Clear(r)
...or use ClearHandler(), which conveniently wraps an http.Handler to clear
variables at the end of a request lifetime.
The Routers from the packages gorilla/mux and gorilla/pat call Clear()
so if you are using either of them you don't need to clear the context manually.
*/
package context

View File

@@ -1,23 +0,0 @@
language: go
sudo: false
matrix:
include:
- go: 1.5.x
- go: 1.6.x
- go: 1.7.x
- go: 1.8.x
- go: 1.9.x
- go: 1.10.x
- go: tip
allow_failures:
- go: tip
install:
- # Skip
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d .)
- go tool vet .
- go test -v -race ./...

8
vendor/github.com/gorilla/mux/AUTHORS generated vendored Normal file
View File

@@ -0,0 +1,8 @@
# This is the official list of gorilla/mux authors for copyright purposes.
#
# Please keep the list sorted.
Google LLC (https://opensource.google.com/)
Kamil Kisielk <kamil@kamilkisiel.net>
Matt Silverlock <matt@eatsleeprepeat.net>
Rodrigo Moraes (https://github.com/moraes)

View File

@@ -1,11 +0,0 @@
**What version of Go are you running?** (Paste the output of `go version`)
**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`)
**Describe your problem** (and what you have tried so far)
**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it)

View File

@@ -1,4 +1,4 @@
Copyright (c) 2012 Rodrigo Moraes. All rights reserved. Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are modification, are permitted provided that the following conditions are

View File

@@ -2,11 +2,12 @@
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) [![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) [![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux)
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png) ![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
http://www.gorillatoolkit.org/pkg/mux https://www.gorillatoolkit.org/pkg/mux
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
their respective handler. their respective handler.
@@ -29,6 +30,7 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
* [Walking Routes](#walking-routes) * [Walking Routes](#walking-routes)
* [Graceful Shutdown](#graceful-shutdown) * [Graceful Shutdown](#graceful-shutdown)
* [Middleware](#middleware) * [Middleware](#middleware)
* [Handling CORS Requests](#handling-cors-requests)
* [Testing Handlers](#testing-handlers) * [Testing Handlers](#testing-handlers)
* [Full Example](#full-example) * [Full Example](#full-example)
@@ -88,7 +90,7 @@ r := mux.NewRouter()
// Only matches if domain is "www.example.com". // Only matches if domain is "www.example.com".
r.Host("www.example.com") r.Host("www.example.com")
// Matches a dynamic subdomain. // Matches a dynamic subdomain.
r.Host("{subdomain:[a-z]+}.domain.com") r.Host("{subdomain:[a-z]+}.example.com")
``` ```
There are several other matchers that can be added. To match path prefixes: There are several other matchers that can be added. To match path prefixes:
@@ -238,13 +240,13 @@ This also works for host and query value variables:
```go ```go
r := mux.NewRouter() r := mux.NewRouter()
r.Host("{subdomain}.domain.com"). r.Host("{subdomain}.example.com").
Path("/articles/{category}/{id:[0-9]+}"). Path("/articles/{category}/{id:[0-9]+}").
Queries("filter", "{filter}"). Queries("filter", "{filter}").
HandlerFunc(ArticleHandler). HandlerFunc(ArticleHandler).
Name("article") Name("article")
// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" // url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news", url, err := r.Get("article").URL("subdomain", "news",
"category", "technology", "category", "technology",
"id", "42", "id", "42",
@@ -264,7 +266,7 @@ r.HeadersRegexp("Content-Type", "application/(text|json)")
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
```go ```go
// "http://news.domain.com/" // "http://news.example.com/"
host, err := r.Get("article").URLHost("subdomain", "news") host, err := r.Get("article").URLHost("subdomain", "news")
// "/articles/technology/42" // "/articles/technology/42"
@@ -275,12 +277,12 @@ And if you use subrouters, host and path defined separately can be built as well
```go ```go
r := mux.NewRouter() r := mux.NewRouter()
s := r.Host("{subdomain}.domain.com").Subrouter() s := r.Host("{subdomain}.example.com").Subrouter()
s.Path("/articles/{category}/{id:[0-9]+}"). s.Path("/articles/{category}/{id:[0-9]+}").
HandlerFunc(ArticleHandler). HandlerFunc(ArticleHandler).
Name("article") Name("article")
// "http://news.domain.com/articles/technology/42" // "http://news.example.com/articles/technology/42"
url, err := r.Get("article").URL("subdomain", "news", url, err := r.Get("article").URL("subdomain", "news",
"category", "technology", "category", "technology",
"id", "42") "id", "42")
@@ -491,6 +493,73 @@ r.Use(amw.Middleware)
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
### Handling CORS Requests
[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header.
* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin`
* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route
* If you do not specify any methods, then:
> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers.
Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers:
```go
package main
import (
"net/http"
"github.com/gorilla/mux"
)
func main() {
r := mux.NewRouter()
// IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers
r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions)
r.Use(mux.CORSMethodMiddleware(r))
http.ListenAndServe(":8080", r)
}
func fooHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
if r.Method == http.MethodOptions {
return
}
w.Write([]byte("foo"))
}
```
And an request to `/foo` using something like:
```bash
curl localhost:8080/foo -v
```
Would look like:
```bash
* Trying ::1...
* TCP_NODELAY set
* Connected to localhost (::1) port 8080 (#0)
> GET /foo HTTP/1.1
> Host: localhost:8080
> User-Agent: curl/7.59.0
> Accept: */*
>
< HTTP/1.1 200 OK
< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS
< Access-Control-Allow-Origin: *
< Date: Fri, 28 Jun 2019 20:13:30 GMT
< Content-Length: 3
< Content-Type: text/plain; charset=utf-8
<
* Connection #0 to host localhost left intact
foo
```
### Testing Handlers ### Testing Handlers
Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
@@ -503,8 +572,8 @@ package main
func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
// A very simple health check. // A very simple health check.
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
// In the future we could report back on the status of our DB, or our cache // In the future we could report back on the status of our DB, or our cache
// (e.g. Redis) by performing a simple PING, and include them in the response. // (e.g. Redis) by performing a simple PING, and include them in the response.

View File

@@ -1,5 +1,3 @@
// +build go1.7
package mux package mux
import ( import (
@@ -18,7 +16,3 @@ func contextSet(r *http.Request, key, val interface{}) *http.Request {
return r.WithContext(context.WithValue(r.Context(), key, val)) return r.WithContext(context.WithValue(r.Context(), key, val))
} }
func contextClear(r *http.Request) {
return
}

View File

@@ -1,26 +0,0 @@
// +build !go1.7
package mux
import (
"net/http"
"github.com/gorilla/context"
)
func contextGet(r *http.Request, key interface{}) interface{} {
return context.Get(r, key)
}
func contextSet(r *http.Request, key, val interface{}) *http.Request {
if val == nil {
return r
}
context.Set(r, key, val)
return r
}
func contextClear(r *http.Request) {
context.Clear(r)
}

View File

@@ -295,7 +295,7 @@ A more complex authentication middleware, which maps session token to users, cou
r := mux.NewRouter() r := mux.NewRouter()
r.HandleFunc("/", handler) r.HandleFunc("/", handler)
amw := authenticationMiddleware{} amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
amw.Populate() amw.Populate()
r.Use(amw.Middleware) r.Use(amw.Middleware)

1
vendor/github.com/gorilla/mux/go.mod generated vendored Normal file
View File

@@ -0,0 +1 @@
module github.com/gorilla/mux

View File

@@ -32,37 +32,19 @@ func (r *Router) useInterface(mw middleware) {
r.middlewares = append(r.middlewares, mw) r.middlewares = append(r.middlewares, mw)
} }
// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header // CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header
// on a request, by matching routes based only on paths. It also handles // on requests for routes that have an OPTIONS method matcher to all the method matchers on
// OPTIONS requests, by settings Access-Control-Allow-Methods, and then // the route. Routes that do not explicitly handle OPTIONS requests will not be processed
// returning without calling the next http handler. // by the middleware. See examples for usage.
func CORSMethodMiddleware(r *Router) MiddlewareFunc { func CORSMethodMiddleware(r *Router) MiddlewareFunc {
return func(next http.Handler) http.Handler { return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
var allMethods []string allMethods, err := getAllMethodsForRoute(r, req)
err := r.Walk(func(route *Route, _ *Router, _ []*Route) error {
for _, m := range route.matchers {
if _, ok := m.(*routeRegexp); ok {
if m.Match(req, &RouteMatch{}) {
methods, err := route.GetMethods()
if err != nil {
return err
}
allMethods = append(allMethods, methods...)
}
break
}
}
return nil
})
if err == nil { if err == nil {
w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ",")) for _, v := range allMethods {
if v == http.MethodOptions {
if req.Method == "OPTIONS" { w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ","))
return }
} }
} }
@@ -70,3 +52,28 @@ func CORSMethodMiddleware(r *Router) MiddlewareFunc {
}) })
} }
} }
// getAllMethodsForRoute returns all the methods from method matchers matching a given
// request.
func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) {
var allMethods []string
err := r.Walk(func(route *Route, _ *Router, _ []*Route) error {
for _, m := range route.matchers {
if _, ok := m.(*routeRegexp); ok {
if m.Match(req, &RouteMatch{}) {
methods, err := route.GetMethods()
if err != nil {
return err
}
allMethods = append(allMethods, methods...)
}
break
}
}
return nil
})
return allMethods, err
}

129
vendor/github.com/gorilla/mux/mux.go generated vendored
View File

@@ -22,7 +22,7 @@ var (
// NewRouter returns a new router instance. // NewRouter returns a new router instance.
func NewRouter() *Router { func NewRouter() *Router {
return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} return &Router{namedRoutes: make(map[string]*Route)}
} }
// Router registers routes to be matched and dispatches a handler. // Router registers routes to be matched and dispatches a handler.
@@ -50,24 +50,78 @@ type Router struct {
// Configurable Handler to be used when the request method does not match the route. // Configurable Handler to be used when the request method does not match the route.
MethodNotAllowedHandler http.Handler MethodNotAllowedHandler http.Handler
// Parent route, if this is a subrouter.
parent parentRoute
// Routes to be matched, in order. // Routes to be matched, in order.
routes []*Route routes []*Route
// Routes by name for URL building. // Routes by name for URL building.
namedRoutes map[string]*Route namedRoutes map[string]*Route
// See Router.StrictSlash(). This defines the flag for new routes.
strictSlash bool
// See Router.SkipClean(). This defines the flag for new routes.
skipClean bool
// If true, do not clear the request context after handling the request. // If true, do not clear the request context after handling the request.
// This has no effect when go1.7+ is used, since the context is stored //
// Deprecated: No effect when go1.7+ is used, since the context is stored
// on the request itself. // on the request itself.
KeepContext bool KeepContext bool
// see Router.UseEncodedPath(). This defines a flag for all routes.
useEncodedPath bool
// Slice of middlewares to be called after a match is found // Slice of middlewares to be called after a match is found
middlewares []middleware middlewares []middleware
// configuration shared with `Route`
routeConf
}
// common route configuration shared between `Router` and `Route`
type routeConf struct {
// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
useEncodedPath bool
// If true, when the path pattern is "/path/", accessing "/path" will
// redirect to the former and vice versa.
strictSlash bool
// If true, when the path pattern is "/path//to", accessing "/path//to"
// will not redirect
skipClean bool
// Manager for the variables from host and path.
regexp routeRegexpGroup
// List of matchers.
matchers []matcher
// The scheme used when building URLs.
buildScheme string
buildVarsFunc BuildVarsFunc
}
// returns an effective deep copy of `routeConf`
func copyRouteConf(r routeConf) routeConf {
c := r
if r.regexp.path != nil {
c.regexp.path = copyRouteRegexp(r.regexp.path)
}
if r.regexp.host != nil {
c.regexp.host = copyRouteRegexp(r.regexp.host)
}
c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
for _, q := range r.regexp.queries {
c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
}
c.matchers = make([]matcher, 0, len(r.matchers))
for _, m := range r.matchers {
c.matchers = append(c.matchers, m)
}
return c
}
func copyRouteRegexp(r *routeRegexp) *routeRegexp {
c := *r
return &c
} }
// Match attempts to match the given request against the router's registered routes. // Match attempts to match the given request against the router's registered routes.
@@ -155,22 +209,18 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
handler = http.NotFoundHandler() handler = http.NotFoundHandler()
} }
if !r.KeepContext {
defer contextClear(req)
}
handler.ServeHTTP(w, req) handler.ServeHTTP(w, req)
} }
// Get returns a route registered with the given name. // Get returns a route registered with the given name.
func (r *Router) Get(name string) *Route { func (r *Router) Get(name string) *Route {
return r.getNamedRoutes()[name] return r.namedRoutes[name]
} }
// GetRoute returns a route registered with the given name. This method // GetRoute returns a route registered with the given name. This method
// was renamed to Get() and remains here for backwards compatibility. // was renamed to Get() and remains here for backwards compatibility.
func (r *Router) GetRoute(name string) *Route { func (r *Router) GetRoute(name string) *Route {
return r.getNamedRoutes()[name] return r.namedRoutes[name]
} }
// StrictSlash defines the trailing slash behavior for new routes. The initial // StrictSlash defines the trailing slash behavior for new routes. The initial
@@ -221,55 +271,24 @@ func (r *Router) UseEncodedPath() *Router {
return r return r
} }
// ----------------------------------------------------------------------------
// parentRoute
// ----------------------------------------------------------------------------
func (r *Router) getBuildScheme() string {
if r.parent != nil {
return r.parent.getBuildScheme()
}
return ""
}
// getNamedRoutes returns the map where named routes are registered.
func (r *Router) getNamedRoutes() map[string]*Route {
if r.namedRoutes == nil {
if r.parent != nil {
r.namedRoutes = r.parent.getNamedRoutes()
} else {
r.namedRoutes = make(map[string]*Route)
}
}
return r.namedRoutes
}
// getRegexpGroup returns regexp definitions from the parent route, if any.
func (r *Router) getRegexpGroup() *routeRegexpGroup {
if r.parent != nil {
return r.parent.getRegexpGroup()
}
return nil
}
func (r *Router) buildVars(m map[string]string) map[string]string {
if r.parent != nil {
m = r.parent.buildVars(m)
}
return m
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Route factories // Route factories
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// NewRoute registers an empty route. // NewRoute registers an empty route.
func (r *Router) NewRoute() *Route { func (r *Router) NewRoute() *Route {
route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath} // initialize a route with a copy of the parent router's configuration
route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
r.routes = append(r.routes, route) r.routes = append(r.routes, route)
return route return route
} }
// Name registers a new route with a name.
// See Route.Name().
func (r *Router) Name(name string) *Route {
return r.NewRoute().Name(name)
}
// Handle registers a new route with a matcher for the URL path. // Handle registers a new route with a matcher for the URL path.
// See Route.Path() and Route.Handler(). // See Route.Path() and Route.Handler().
func (r *Router) Handle(path string, handler http.Handler) *Route { func (r *Router) Handle(path string, handler http.Handler) *Route {

Some files were not shown because too many files have changed in this diff Show More