dashboard 备注 客户端管理优化 多客户端支持 流量显示支持 热更新支持 404错误页支持

This commit is contained in:
刘河
2019-01-25 12:10:12 +08:00
parent c533436c78
commit c34e5e1a7d
37 changed files with 5415 additions and 732 deletions

View File

@@ -107,14 +107,14 @@ func (s *SnappyConn) Write(b []byte) (n int, err error) {
//snappy压缩读 包含解密
func (s *SnappyConn) Read(b []byte) (n int, err error) {
buf := bufPool.Get().([]byte)
defer func() {
if err == nil && n == len(IO_EOF) && string(b[:n]) == IO_EOF {
err = io.EOF
n = 0
}
bufPool.Put(buf)
}()
buf := bufPool.Get().([]byte)
defer bufPool.Put(buf)
if n, err = s.r.Read(buf); err != nil {
return
}
@@ -153,8 +153,8 @@ func (s *Conn) ReadLen(cLen int) ([]byte, error) {
buf = bufPoolSmall.Get().([]byte)[:cLen]
defer bufPoolSmall.Put(buf)
} else {
buf = bufPool.Get().([]byte)[:cLen]
defer bufPool.Put(buf)
buf = bufPoolMax.Get().([]byte)[:cLen]
defer bufPoolMax.Put(buf)
}
if n, err := io.ReadFull(s, buf); err != nil || n != cLen {
return buf, errors.New("读取指定长度错误" + err.Error())

487
utils/file.go Normal file
View File

@@ -0,0 +1,487 @@
package utils
import (
"easyProxy/utils"
"encoding/csv"
"errors"
"github.com/astaxie/beego"
"log"
"os"
"strconv"
"sync"
)
var (
CsvDb *Csv
once sync.Once
)
type Flow struct {
ExportFlow int64 //出口流量
InletFlow int64 //入口流量
}
type Client struct {
Cnf *ServerConfig
Id int //id
VerifyKey string //验证密钥
Addr string //客户端ip地址
Remark string //备注
Status bool //是否开启
IsConnect bool //是否连接
Flow *Flow
}
type ServerConfig struct {
TcpPort int //服务端与客户端通信端口
VerifyKey string
Mode string //启动方式
Target string //目标
U string //socks5验证用户名
P string //socks5验证密码
Compress string //压缩方式
Start int //是否开启
IsRun int //是否在运行
ClientStatus int //客s户端状态
Crypt bool //是否加密
Mux bool //是否加密
CompressEncode int //加密方式
CompressDecode int //解密方式
Id int //Id
ClientId int //所属客户端id
UseClientCnf bool //是否继承客户端配置
Flow *Flow
Remark string //备注
}
type HostList struct {
ClientId int //服务端与客户端通信端口
Host string //启动方式
Target string //目标
HeaderChange string //host修改
HostChange string //host修改
Flow *Flow
Remark string //备注
}
func NewCsv() *Csv {
c := new(Csv)
return c
}
type Csv struct {
Tasks []*ServerConfig
Path string
Hosts []*HostList //域名列表
Clients []*Client //客户端
ClientIncreaseId int //客户端id
TaskIncreaseId int //任务自增ID
sync.Mutex
}
func (s *Csv) Init() {
s.LoadTaskFromCsv()
s.LoadHostFromCsv()
s.LoadClientFromCsv()
}
func (s *Csv) StoreTasksToCsv() {
// 创建文件
csvFile, err := os.Create(beego.AppPath + "/conf/tasks.csv")
if err != nil {
log.Fatalf(err.Error())
}
defer csvFile.Close()
writer := csv.NewWriter(csvFile)
for _, task := range s.Tasks {
record := []string{
strconv.Itoa(task.TcpPort),
task.Mode,
task.Target,
task.U,
task.P,
task.Compress,
strconv.Itoa(task.Start),
GetStrByBool(task.Crypt),
GetStrByBool(task.Mux),
strconv.Itoa(task.CompressEncode),
strconv.Itoa(task.CompressDecode),
strconv.Itoa(task.Id),
strconv.Itoa(task.ClientId),
strconv.FormatBool(task.UseClientCnf),
task.Remark,
}
err := writer.Write(record)
if err != nil {
log.Fatalf(err.Error())
}
}
writer.Flush()
}
func (s *Csv) openFile(path string) ([][]string, error) {
// 打开文件
file, err := os.Open(path)
if err != nil {
panic(err)
}
defer file.Close()
// 获取csv的reader
reader := csv.NewReader(file)
// 设置FieldsPerRecord为-1
reader.FieldsPerRecord = -1
// 读取文件中所有行保存到slice中
return reader.ReadAll()
}
func (s *Csv) LoadTaskFromCsv() {
path := beego.AppPath + "/conf/tasks.csv"
records, err := s.openFile(path)
if err != nil {
log.Fatal("配置文件打开错误:", path)
}
var tasks []*ServerConfig
// 将每一行数据保存到内存slice中
for _, item := range records {
post := &ServerConfig{
TcpPort: GetIntNoErrByStr(item[0]),
Mode: item[1],
Target: item[2],
U: item[3],
P: item[4],
Compress: item[5],
Start: GetIntNoErrByStr(item[6]),
Crypt: GetBoolByStr(item[7]),
Mux: GetBoolByStr(item[8]),
CompressEncode: GetIntNoErrByStr(item[9]),
CompressDecode: GetIntNoErrByStr(item[10]),
Id: GetIntNoErrByStr(item[11]),
ClientId: GetIntNoErrByStr(item[12]),
UseClientCnf: GetBoolByStr(item[13]),
Remark: item[14],
}
post.Flow = new(Flow)
tasks = append(tasks, post)
if post.Id > s.TaskIncreaseId {
s.TaskIncreaseId = post.Id
}
}
s.Tasks = tasks
}
func (s *Csv) GetTaskId() int {
s.Lock()
defer s.Unlock()
s.TaskIncreaseId++
return s.TaskIncreaseId
}
func (s *Csv) GetIdByVerifyKey(vKey string, addr string) (int, error) {
s.Lock()
defer s.Unlock()
for _, v := range s.Clients {
if utils.Getverifyval(v.VerifyKey) == vKey && v.Status {
v.Addr = addr
return v.Id, nil
}
}
return 0, errors.New("not found")
}
func (s *Csv) NewTask(t *ServerConfig) {
t.Flow = new(Flow)
s.Tasks = append(s.Tasks, t)
s.StoreTasksToCsv()
}
func (s *Csv) UpdateTask(t *ServerConfig) error {
for k, v := range s.Tasks {
if v.Id == t.Id {
s.Tasks = append(s.Tasks[:k], s.Tasks[k+1:]...)
s.Tasks = append(s.Tasks, t)
s.StoreTasksToCsv()
return nil
}
}
return errors.New("不存在")
}
func (s *Csv) DelTask(id int) error {
for k, v := range s.Tasks {
if v.Id == id {
s.Tasks = append(s.Tasks[:k], s.Tasks[k+1:]...)
s.StoreTasksToCsv()
return nil
}
}
return errors.New("不存在")
}
func (s *Csv) GetTask(id int) (v *ServerConfig, err error) {
for _, v = range s.Tasks {
if v.Id == id {
return
}
}
err = errors.New("未找到")
return
}
func (s *Csv) StoreHostToCsv() {
// 创建文件
csvFile, err := os.Create(beego.AppPath + "/conf/hosts.csv")
if err != nil {
panic(err)
}
defer csvFile.Close()
// 获取csv的Writer
writer := csv.NewWriter(csvFile)
// 将map中的Post转换成slice因为csv的Write需要slice参数
// 并写入csv文件
for _, host := range s.Hosts {
record := []string{
host.Host,
host.Target,
strconv.Itoa(host.ClientId),
host.HeaderChange,
host.HostChange,
host.Remark,
}
err1 := writer.Write(record)
if err1 != nil {
panic(err1)
}
}
// 确保所有内存数据刷到csv文件
writer.Flush()
}
func (s *Csv) LoadClientFromCsv() {
path := beego.AppPath + "/conf/clients.csv"
records, err := s.openFile(path)
if err != nil {
log.Fatal("配置文件打开错误:", path)
}
var clients []*Client
// 将每一行数据保存到内存slice中
for _, item := range records {
post := &Client{
Id: GetIntNoErrByStr(item[0]),
VerifyKey: item[1],
Addr: item[2],
Remark: item[3],
Status: GetBoolByStr(item[4]),
Cnf: &ServerConfig{
U: item[5],
P: item[6],
Crypt: GetBoolByStr(item[7]),
Mux: GetBoolByStr(item[8]),
Compress: item[9],
},
}
if post.Id > s.ClientIncreaseId {
s.ClientIncreaseId = post.Id
}
post.Flow = new(Flow)
clients = append(clients, post)
}
s.Clients = clients
}
func (s *Csv) LoadHostFromCsv() {
path := beego.AppPath + "/conf/hosts.csv"
records, err := s.openFile(path)
if err != nil {
log.Fatal("配置文件打开错误:", path)
}
var hosts []*HostList
// 将每一行数据保存到内存slice中
for _, item := range records {
post := &HostList{
ClientId: GetIntNoErrByStr(item[2]),
Host: item[0],
Target: item[1],
HeaderChange: item[3],
HostChange: item[4],
Remark: item[5],
}
post.Flow = new(Flow)
hosts = append(hosts, post)
}
s.Hosts = hosts
}
func (s *Csv) DelHost(host string) error {
for k, v := range s.Hosts {
if v.Host == host {
s.Hosts = append(s.Hosts[:k], s.Hosts[k+1:]...)
s.StoreHostToCsv()
return nil
}
}
return errors.New("不存在")
}
func (s *Csv) NewHost(t *HostList) {
t.Flow = new(Flow)
s.Hosts = append(s.Hosts, t)
s.StoreHostToCsv()
}
func (s *Csv) UpdateHost(t *HostList) error {
for k, v := range s.Hosts {
if v.Host == t.Host {
s.Hosts = append(s.Hosts[:k], s.Hosts[k+1:]...)
s.Hosts = append(s.Hosts, t)
s.StoreHostToCsv()
return nil
}
}
return errors.New("不存在")
}
func (s *Csv) GetHostList(start, length int, id int) ([]*HostList, int) {
list := make([]*HostList, 0)
var cnt int
for _, v := range s.Hosts {
if id == 0 || v.ClientId == id {
cnt++
if start--; start < 0 {
if length--; length > 0 {
list = append(list, v)
}
}
}
}
return list, cnt
}
func (s *Csv) DelClient(id int) error {
for k, v := range s.Clients {
if v.Id == id {
s.Clients = append(s.Clients[:k], s.Clients[k+1:]...)
s.StoreClientsToCsv()
return nil
}
}
return errors.New("不存在")
}
func (s *Csv) NewClient(c *Client) {
s.Lock()
defer s.Unlock()
c.Flow = new(Flow)
s.Clients = append(s.Clients, c)
s.StoreClientsToCsv()
}
func (s *Csv) GetClientId() int {
s.Lock()
defer s.Unlock()
s.ClientIncreaseId++
return s.ClientIncreaseId
}
func (s *Csv) UpdateClient(t *Client) error {
s.Lock()
defer s.Unlock()
for k, v := range s.Clients {
if v.Id == t.Id {
s.Clients = append(s.Clients[:k], s.Clients[k+1:]...)
s.Clients = append(s.Clients, t)
s.StoreClientsToCsv()
return nil
}
}
return errors.New("不存在")
}
func (s *Csv) GetClientList(start, length int) ([]*Client, int) {
list := make([]*Client, 0)
var cnt int
for _, v := range s.Clients {
cnt++
if start--; start < 0 {
if length--; length > 0 {
list = append(list, v)
}
}
}
return list, cnt
}
func (s *Csv) GetClient(id int) (v *Client, err error) {
for _, v = range s.Clients {
if v.Id == id {
return
}
}
err = errors.New("未找到")
return
}
func (s *Csv) StoreClientsToCsv() {
// 创建文件
csvFile, err := os.Create(beego.AppPath + "/conf/clients.csv")
if err != nil {
log.Fatalf(err.Error())
}
defer csvFile.Close()
writer := csv.NewWriter(csvFile)
for _, client := range s.Clients {
record := []string{
strconv.Itoa(client.Id),
client.VerifyKey,
client.Addr,
client.Remark,
strconv.FormatBool(client.Status),
client.Cnf.U,
client.Cnf.P,
utils.GetStrByBool(client.Cnf.Crypt),
utils.GetStrByBool(client.Cnf.Mux),
client.Cnf.Compress,
}
err := writer.Write(record)
if err != nil {
log.Fatalf(err.Error())
}
}
writer.Flush()
}
//init csv from file
func GetCsvDb() *Csv {
once.Do(func() {
CsvDb = NewCsv()
CsvDb.Init()
})
return CsvDb
}
//深拷贝serverConfig
func DeepCopyConfig(c *ServerConfig) *ServerConfig {
return &ServerConfig{
TcpPort: c.TcpPort,
VerifyKey: c.VerifyKey,
Mode: c.Mode,
Target: c.Target,
U: c.U,
P: c.P,
Compress: c.Compress,
Start: c.Start,
IsRun: c.IsRun,
ClientStatus: c.ClientStatus,
Crypt: c.Crypt,
Mux: c.Mux,
CompressEncode: c.CompressEncode,
CompressDecode: c.CompressDecode,
Id: c.Id,
ClientId: c.ClientId,
UseClientCnf: c.UseClientCnf,
Flow: c.Flow,
Remark: c.Remark,
}
}

View File

@@ -3,7 +3,8 @@ package utils
import "sync"
const poolSize = 64 * 1024
const poolSizeSmall = 10
const poolSizeSmall = 100
const poolSizeUdp = 1472
const poolSizeCopy = 32 * 1024
var bufPool = sync.Pool{
@@ -11,6 +12,16 @@ var bufPool = sync.Pool{
return make([]byte, poolSize)
},
}
var BufPoolUdp = sync.Pool{
New: func() interface{} {
return make([]byte, poolSizeUdp)
},
}
var bufPoolMax = sync.Pool{
New: func() interface{} {
return make([]byte, poolSize)
},
}
var bufPoolSmall = sync.Pool{
New: func() interface{} {
return make([]byte, poolSizeSmall)

View File

@@ -3,9 +3,11 @@ package utils
import (
"encoding/base64"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"regexp"
"strconv"
"strings"
@@ -18,48 +20,52 @@ const (
COMPRESS_NONE_DECODE
COMPRESS_SNAPY_ENCODE
COMPRESS_SNAPY_DECODE
VERIFY_EER = "vkey"
WORK_MAIN = "main"
WORK_CHAN = "chan"
RES_SIGN = "sign"
RES_MSG = "msg0"
CONN_SUCCESS = "sucs"
CONN_ERROR = "fail"
TEST_FLAG = "tst"
CONN_TCP = "tcp"
CONN_UDP = "udp"
Unauthorized_BYTES = `HTTP/1.1 401 Unauthorized
VERIFY_EER = "vkey"
WORK_MAIN = "main"
WORK_CHAN = "chan"
RES_SIGN = "sign"
RES_MSG = "msg0"
CONN_SUCCESS = "sucs"
CONN_ERROR = "fail"
TEST_FLAG = "tst"
CONN_TCP = "tcp"
CONN_UDP = "udp"
UnauthorizedBytes = `HTTP/1.1 401 Unauthorized
Content-Type: text/plain; charset=utf-8
WWW-Authenticate: Basic realm="easyProxy"
401 Unauthorized`
IO_EOF = "PROXYEOF"
IO_EOF = "PROXYEOF"
ConnectionFailBytes = `HTTP/1.1 404 Not Found
`
)
//copy
func Relay(in, out net.Conn, compressType int, crypt, mux bool) {
func Relay(in, out net.Conn, compressType int, crypt, mux bool) (n int64, err error) {
switch compressType {
case COMPRESS_SNAPY_ENCODE:
copyBuffer(NewSnappyConn(in, crypt), out)
n, err = copyBuffer(NewSnappyConn(in, crypt), out)
out.Close()
NewSnappyConn(in, crypt).Write([]byte(IO_EOF))
case COMPRESS_SNAPY_DECODE:
copyBuffer(in, NewSnappyConn(out, crypt))
n, err = copyBuffer(in, NewSnappyConn(out, crypt))
in.Close()
if !mux {
out.Close()
}
case COMPRESS_NONE_ENCODE:
copyBuffer(NewCryptConn(in, crypt), out)
n, err = copyBuffer(NewCryptConn(in, crypt), out)
out.Close()
NewCryptConn(in, crypt).Write([]byte(IO_EOF))
case COMPRESS_NONE_DECODE:
copyBuffer(in, NewCryptConn(out, crypt))
n, err = copyBuffer(in, NewCryptConn(out, crypt))
in.Close()
if !mux {
out.Close()
}
}
return
}
//判断压缩方式
@@ -145,7 +151,6 @@ func GetIntNoErrByStr(str string) int {
return i
}
// io.copy的优化版读取buffer长度原为32*1024与snappy不同导致读取出的内容存在差异不利于解密
//内存优化 用到pool快速回收
func copyBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
@@ -167,7 +172,7 @@ func copyBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
err = io.ErrShortWrite
break
}
}else {
} else {
bufPoolCopy.Put(buf)
}
if er != nil {
@@ -199,15 +204,17 @@ func Getverifyval(vkey string) string {
}
//wait replay group
func ReplayWaitGroup(conn1 net.Conn, conn2 net.Conn, compressEncode, compressDecode int, crypt, mux bool) {
//conn1 网桥 conn2
func ReplayWaitGroup(conn1 net.Conn, conn2 net.Conn, compressEncode, compressDecode int, crypt, mux bool) (out int64, in int64) {
var wg sync.WaitGroup
wg.Add(1)
go func() {
Relay(conn1, conn2, compressEncode, crypt, mux)
in, _ = Relay(conn1, conn2, compressEncode, crypt, mux)
wg.Done()
}()
Relay(conn2, conn1, compressDecode, crypt, mux)
out, _ = Relay(conn2, conn1, compressDecode, crypt, mux)
wg.Wait()
return
}
func ChangeHostAndHeader(r *http.Request, host string, header string, addr string) {
@@ -227,3 +234,12 @@ func ChangeHostAndHeader(r *http.Request, host string, header string, addr strin
r.Header.Set("X-Forwarded-For", addr)
r.Header.Set("X-Real-IP", addr)
}
func ReadAllFromFile(filePth string) ([]byte, error) {
f, err := os.Open(filePth)
if err != nil {
return nil, err
}
return ioutil.ReadAll(f)
}