MUX optimization

This commit is contained in:
刘河
2019-03-15 14:03:49 +08:00
parent f78e81b452
commit 97330bfbdc
33 changed files with 749 additions and 328 deletions

View File

@@ -83,7 +83,7 @@ func GetStrByBool(b bool) string {
//int
func GetIntNoErrByStr(str string) int {
i, _ := strconv.Atoi(str)
i, _ := strconv.Atoi(strings.TrimSpace(str))
return i
}
@@ -241,7 +241,8 @@ func GetIpByAddr(addr string) string {
}
func CopyBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
buf := pool.BufPoolCopy.Get().([]byte)
buf := pool.GetBufPoolCopy()
defer pool.PutBufPoolCopy(buf)
for {
nr, er := src.Read(buf)
if nr > 0 {
@@ -265,7 +266,6 @@ func CopyBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
break
}
}
defer pool.PutBufPoolCopy(buf)
return written, err
}
@@ -303,3 +303,35 @@ func GetEnvMap() map[string]string {
}
return m
}
func TrimArr(arr []string) []string {
newArr := make([]string, 0)
for _, v := range arr {
if v != "" {
newArr = append(newArr, v)
}
}
return newArr
}
func IsArrContains(arr []string, val string) bool {
if arr == nil {
return false
}
for _, v := range arr {
if v == val {
return true
}
}
return false
}
func RemoveArrVal(arr []string, val string) []string {
for k, v := range arr {
if v == val {
arr = append(arr[:k], arr[k+1:]...)
return arr
}
}
return arr
}

View File

@@ -29,6 +29,7 @@ type Config struct {
CommonConfig *CommonConfig
Hosts []*file.Host
Tasks []*file.Tunnel
Healths []*file.Health
LocalServer []*LocalServer
}
@@ -56,18 +57,24 @@ func NewConfig(path string) (c *Config, err error) {
}
nowContent = c.content[nowIndex:nextIndex]
if strings.Index(getTitleContent(c.title[i]), "secret") == 0 {
if strings.Index(getTitleContent(c.title[i]), "secret") == 0 && !strings.Contains(nowContent, "mode") {
local := delLocalService(nowContent)
local.Type = "secret"
c.LocalServer = append(c.LocalServer, local)
continue
}
if strings.Index(getTitleContent(c.title[i]), "p2p") == 0 {
//except mode
if strings.Index(getTitleContent(c.title[i]), "p2p") == 0 && !strings.Contains(nowContent, "mode") {
local := delLocalService(nowContent)
local.Type = "p2p"
c.LocalServer = append(c.LocalServer, local)
continue
}
//health set
if strings.Index(getTitleContent(c.title[i]), "health") == 0 {
c.Healths = append(c.Healths, dealHealth(nowContent))
continue
}
switch c.title[i] {
case "[common]":
c.CommonConfig = dealCommon(nowContent)
@@ -146,15 +153,37 @@ func dealHost(s string) *file.Host {
} else if len(item) == 1 {
item = append(item, "")
}
switch item[0] {
switch strings.TrimSpace(item[0]) {
case "host":
h.Host = item[1]
case "target":
h.Target = strings.Replace(item[1], ",", "\n", -1)
case "host_change":
h.HostChange = item[1]
case "schemego":
h.Scheme = item[1]
case "location":
h.Location = item[1]
default:
if strings.Contains(item[0], "header") {
headerChange += strings.Replace(item[0], "header_", "", -1) + ":" + item[1] + "\n"
}
h.HeaderChange = headerChange
}
}
return h
}
func dealHealth(s string) *file.Health {
h := &file.Health{}
for _, v := range strings.Split(s, "\n") {
item := strings.Split(v, "=")
if len(item) == 0 {
continue
} else if len(item) == 1 {
item = append(item, "")
}
switch strings.TrimSpace(item[0]) {
case "health_check_timeout":
h.HealthCheckTimeout = common.GetIntNoErrByStr(item[1])
case "health_check_max_failed":
@@ -163,11 +192,10 @@ func dealHost(s string) *file.Host {
h.HealthCheckInterval = common.GetIntNoErrByStr(item[1])
case "health_http_url":
h.HttpHealthUrl = item[1]
default:
if strings.Contains(item[0], "header") {
headerChange += strings.Replace(item[0], "header_", "", -1) + ":" + item[1] + "\n"
}
h.HeaderChange = headerChange
case "health_check_type":
h.HealthCheckType = item[1]
case "health_check_target":
h.HealthCheckTarget = item[1]
}
}
return h
@@ -182,7 +210,7 @@ func dealTunnel(s string) *file.Tunnel {
} else if len(item) == 1 {
item = append(item, "")
}
switch item[0] {
switch strings.TrimSpace(item[0]) {
case "port":
t.Ports = item[1]
case "mode":
@@ -197,12 +225,6 @@ func dealTunnel(s string) *file.Tunnel {
t.LocalPath = item[1]
case "strip_pre":
t.StripPre = item[1]
case "health_check_timeout":
t.HealthCheckTimeout = common.GetIntNoErrByStr(item[1])
case "health_check_max_failed":
t.HealthMaxFail = common.GetIntNoErrByStr(item[1])
case "health_check_interval":
t.HealthCheckInterval = common.GetIntNoErrByStr(item[1])
}
}
return t

View File

@@ -150,8 +150,6 @@ func (s *Conn) SetReadDeadline(t time.Duration, tp string) {
func (s *Conn) SendLinkInfo(link *Link) (int, error) {
raw := bytes.NewBuffer([]byte{})
common.BinaryWrite(raw, link.ConnType, link.Host, common.GetStrByBool(link.Compress), common.GetStrByBool(link.Crypt), link.RemoteAddr)
s.Lock()
defer s.Unlock()
return s.Write(raw.Bytes())
}
@@ -176,6 +174,33 @@ func (s *Conn) GetLinkInfo() (lk *Link, err error) {
return
}
//send info for link
func (s *Conn) SendHealthInfo(info, status string) (int, error) {
raw := bytes.NewBuffer([]byte{})
common.BinaryWrite(raw, info, status)
s.Lock()
defer s.Unlock()
return s.Write(raw.Bytes())
}
//get health info from conn
func (s *Conn) GetHealthInfo() (info string, status bool, err error) {
var l int
buf := pool.BufPoolMax.Get().([]byte)
defer pool.PutBufPoolMax(buf)
if l, err = s.GetLen(); err != nil {
return
} else if _, err = s.ReadLen(l, buf); err != nil {
return
} else {
arr := strings.Split(string(buf[:l]), common.CONN_DATA_SEQ)
if len(arr) >= 2 {
return arr[0], common.GetBoolByStr(arr[1]), nil
}
}
return "", false, errors.New("receive health info error")
}
//send host info
func (s *Conn) SendHostInfo(h *file.Host) (int, error) {
/*
@@ -188,7 +213,7 @@ func (s *Conn) SendHostInfo(h *file.Host) (int, error) {
*/
raw := bytes.NewBuffer([]byte{})
binary.Write(raw, binary.LittleEndian, []byte(common.NEW_HOST))
common.BinaryWrite(raw, h.Host, h.Target, h.HeaderChange, h.HostChange, h.Remark, h.Location)
common.BinaryWrite(raw, h.Host, h.Target, h.HeaderChange, h.HostChange, h.Remark, h.Location, h.Scheme)
s.Lock()
defer s.Unlock()
return s.Write(raw.Bytes())
@@ -228,6 +253,10 @@ func (s *Conn) GetHostInfo() (h *file.Host, err error) {
h.HostChange = arr[3]
h.Remark = arr[4]
h.Location = arr[5]
h.Scheme = arr[6]
if h.Scheme == "" {
h.Scheme = "all"
}
h.Flow = new(file.Flow)
h.NoStore = true
}

View File

@@ -32,7 +32,7 @@ type Csv struct {
ClientIncreaseId int //客户端id
TaskIncreaseId int //任务自增ID
HostIncreaseId int
sync.Mutex
sync.RWMutex
}
func (s *Csv) StoreTasksToCsv() {
@@ -43,6 +43,7 @@ func (s *Csv) StoreTasksToCsv() {
}
defer csvFile.Close()
writer := csv.NewWriter(csvFile)
s.Lock()
for _, task := range s.Tasks {
if task.NoStore {
continue
@@ -64,6 +65,7 @@ func (s *Csv) StoreTasksToCsv() {
logs.Error(err.Error())
}
}
s.Unlock()
writer.Flush()
}
@@ -147,6 +149,7 @@ func (s *Csv) GetIdByVerifyKey(vKey string, addr string) (int, error) {
}
func (s *Csv) NewTask(t *Tunnel) error {
s.Lock()
for _, v := range s.Tasks {
if (v.Mode == "secret" || v.Mode == "p2p") && v.Password == t.Password {
return errors.New(fmt.Sprintf("Secret mode keys %s must be unique", t.Password))
@@ -154,33 +157,42 @@ func (s *Csv) NewTask(t *Tunnel) error {
}
t.Flow = new(Flow)
s.Tasks = append(s.Tasks, t)
s.Unlock()
s.StoreTasksToCsv()
return nil
}
func (s *Csv) UpdateTask(t *Tunnel) error {
s.Lock()
for _, v := range s.Tasks {
if v.Id == t.Id {
s.Unlock()
s.StoreTasksToCsv()
return nil
}
}
s.Unlock()
return errors.New("the task is not exist")
}
func (s *Csv) DelTask(id int) error {
s.Lock()
for k, v := range s.Tasks {
if v.Id == id {
s.Tasks = append(s.Tasks[:k], s.Tasks[k+1:]...)
s.Unlock()
s.StoreTasksToCsv()
return nil
}
}
s.Unlock()
return errors.New("不存在")
}
//md5 password
func (s *Csv) GetTaskByMd5Password(p string) *Tunnel {
s.Lock()
defer s.Unlock()
for _, v := range s.Tasks {
if crypt.Md5(v.Password) == p {
return v
@@ -190,6 +202,8 @@ func (s *Csv) GetTaskByMd5Password(p string) *Tunnel {
}
func (s *Csv) GetTask(id int) (v *Tunnel, err error) {
s.Lock()
defer s.Unlock()
for _, v = range s.Tasks {
if v.Id == id {
return
@@ -210,6 +224,8 @@ func (s *Csv) StoreHostToCsv() {
writer := csv.NewWriter(csvFile)
// 将map中的Post转换成slice因为csv的Write需要slice参数
// 并写入csv文件
s.Lock()
defer s.Unlock()
for _, host := range s.Hosts {
if host.NoStore {
continue
@@ -313,17 +329,22 @@ func (s *Csv) LoadHostFromCsv() {
}
func (s *Csv) DelHost(id int) error {
s.Lock()
for k, v := range s.Hosts {
if v.Id == id {
s.Hosts = append(s.Hosts[:k], s.Hosts[k+1:]...)
s.Unlock()
s.StoreHostToCsv()
return nil
}
}
s.Unlock()
return errors.New("不存在")
}
func (s *Csv) IsHostExist(h *Host) bool {
s.Lock()
defer s.Unlock()
for _, v := range s.Hosts {
if v.Host == h.Host && h.Location == v.Location && (v.Scheme == "all" || v.Scheme == h.Scheme) {
return true
@@ -340,24 +361,31 @@ func (s *Csv) NewHost(t *Host) error {
t.Location = "/"
}
t.Flow = new(Flow)
s.Lock()
s.Hosts = append(s.Hosts, t)
s.Unlock()
s.StoreHostToCsv()
return nil
}
func (s *Csv) UpdateHost(t *Host) error {
s.Lock()
for _, v := range s.Hosts {
if v.Host == t.Host {
s.Unlock()
s.StoreHostToCsv()
return nil
}
}
s.Unlock()
return errors.New("不存在")
}
func (s *Csv) GetHost(start, length int, id int) ([]*Host, int) {
list := make([]*Host, 0)
var cnt int
s.Lock()
defer s.Unlock()
for _, v := range s.Hosts {
if id == 0 || v.Client.Id == id {
cnt++
@@ -372,13 +400,16 @@ func (s *Csv) GetHost(start, length int, id int) ([]*Host, int) {
}
func (s *Csv) DelClient(id int) error {
s.Lock()
for k, v := range s.Clients {
if v.Id == id {
s.Clients = append(s.Clients[:k], s.Clients[k+1:]...)
s.Unlock()
s.StoreClientsToCsv()
return nil
}
}
s.Unlock()
return errors.New("不存在")
}
@@ -402,13 +433,15 @@ reset:
c.Flow = new(Flow)
}
s.Lock()
defer s.Unlock()
s.Clients = append(s.Clients, c)
s.Unlock()
s.StoreClientsToCsv()
return nil
}
func (s *Csv) VerifyVkey(vkey string, id int) bool {
s.Lock()
defer s.Unlock()
for _, v := range s.Clients {
if v.VerifyKey == vkey && v.Id != id {
return false
@@ -426,7 +459,6 @@ func (s *Csv) GetClientId() int {
func (s *Csv) UpdateClient(t *Client) error {
s.Lock()
defer s.Unlock()
for _, v := range s.Clients {
if v.Id == t.Id {
v.Cnf = t.Cnf
@@ -435,16 +467,20 @@ func (s *Csv) UpdateClient(t *Client) error {
v.RateLimit = t.RateLimit
v.Flow = t.Flow
v.Rate = t.Rate
s.Unlock()
s.StoreClientsToCsv()
return nil
}
}
s.Unlock()
return errors.New("该客户端不存在")
}
func (s *Csv) GetClientList(start, length int) ([]*Client, int) {
list := make([]*Client, 0)
var cnt int
s.Lock()
defer s.Unlock()
for _, v := range s.Clients {
if v.NoDisplay {
continue
@@ -460,6 +496,8 @@ func (s *Csv) GetClientList(start, length int) ([]*Client, int) {
}
func (s *Csv) GetClient(id int) (v *Client, err error) {
s.Lock()
defer s.Unlock()
for _, v = range s.Clients {
if v.Id == id {
return
@@ -469,6 +507,8 @@ func (s *Csv) GetClient(id int) (v *Client, err error) {
return
}
func (s *Csv) GetClientIdByVkey(vkey string) (id int, err error) {
s.Lock()
defer s.Unlock()
for _, v := range s.Clients {
if crypt.Md5(v.VerifyKey) == vkey {
id = v.Id
@@ -480,6 +520,8 @@ func (s *Csv) GetClientIdByVkey(vkey string) (id int, err error) {
}
func (s *Csv) GetHostById(id int) (h *Host, err error) {
s.Lock()
defer s.Unlock()
for _, v := range s.Hosts {
if v.Id == id {
h = v
@@ -495,7 +537,12 @@ func (s *Csv) GetInfoByHost(host string, r *http.Request) (h *Host, err error) {
var hosts []*Host
//Handling Ported Access
host = common.GetIpByAddr(host)
s.Lock()
defer s.Unlock()
for _, v := range s.Hosts {
if v.IsClose {
continue
}
//Remove http(s) http(s)://a.proxy.com
//*.proxy.com *.a.proxy.com Do some pan-parsing
tmp := strings.Replace(v.Host, "*", `\w+?`, -1)
@@ -533,6 +580,8 @@ func (s *Csv) StoreClientsToCsv() {
}
defer csvFile.Close()
writer := csv.NewWriter(csvFile)
s.Lock()
defer s.Unlock()
for _, client := range s.Clients {
if client.NoStore {
continue

View File

@@ -2,6 +2,7 @@ package file
import (
"github.com/cnlh/nps/lib/rate"
"github.com/pkg/errors"
"strings"
"sync"
"time"
@@ -78,7 +79,14 @@ func (s *Client) GetConn() bool {
return false
}
//modify the hosts and the tunnels by health information
func (s *Client) ModifyTarget() {
}
func (s *Client) HasTunnel(t *Tunnel) bool {
GetCsvDb().Lock()
defer GetCsvDb().Unlock()
for _, v := range GetCsvDb().Tasks {
if v.Client.Id == s.Id && v.Port == t.Port {
return true
@@ -88,6 +96,8 @@ func (s *Client) HasTunnel(t *Tunnel) bool {
}
func (s *Client) HasHost(h *Host) bool {
GetCsvDb().Lock()
defer GetCsvDb().Unlock()
for _, v := range GetCsvDb().Hosts {
if v.Client.Id == s.Id && v.Host == h.Host && h.Location == v.Location {
return true
@@ -126,14 +136,19 @@ type Health struct {
HealthMap map[string]int
HttpHealthUrl string
HealthRemoveArr []string
HealthCheckType string
HealthCheckTarget string
}
func (s *Tunnel) GetRandomTarget() string {
func (s *Tunnel) GetRandomTarget() (string, error) {
if s.TargetArr == nil {
s.TargetArr = strings.Split(s.Target, "\n")
}
if len(s.TargetArr) == 1 {
return s.TargetArr[0]
return s.TargetArr[0], nil
}
if len(s.TargetArr) == 0 {
return "", errors.New("all inward-bending targets are offline")
}
s.Lock()
defer s.Unlock()
@@ -141,7 +156,7 @@ func (s *Tunnel) GetRandomTarget() string {
s.NowIndex = -1
}
s.NowIndex++
return s.TargetArr[s.NowIndex]
return s.TargetArr[s.NowIndex], nil
}
type Config struct {
@@ -165,23 +180,26 @@ type Host struct {
TargetArr []string
NoStore bool
Scheme string //http https all
IsClose bool
Health
sync.RWMutex
}
func (s *Host) GetRandomTarget() string {
func (s *Host) GetRandomTarget() (string, error) {
if s.TargetArr == nil {
s.TargetArr = strings.Split(s.Target, "\n")
}
if len(s.TargetArr) == 1 {
return s.TargetArr[0]
return s.TargetArr[0], nil
}
if len(s.TargetArr) == 0 {
return "", errors.New("all inward-bending targets are offline")
}
s.Lock()
defer s.Unlock()
if s.NowIndex >= len(s.TargetArr)-1 {
s.NowIndex = -1
} else {
s.NowIndex++
}
return s.TargetArr[s.NowIndex]
s.NowIndex++
return s.TargetArr[s.NowIndex], nil
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/cnlh/nps/lib/pool"
"io"
"net"
"sync"
"time"
)
@@ -15,78 +16,76 @@ type conn struct {
connStatusFailCh chan struct{}
readTimeOut time.Time
writeTimeOut time.Time
sendMsgCh chan *msg //mux
sendStatusCh chan int32 //mux
readBuffer []byte
startRead int //now read position
endRead int //now end read
readFlag bool
readCh chan struct{}
waitQueue *sliceEntry
stopWrite bool
connId int32
isClose bool
readWait bool
mux *Mux
}
type msg struct {
connId int32
content []byte
}
var connPool = sync.Pool{}
func NewMsg(connId int32, content []byte) *msg {
return &msg{
connId: connId,
content: content,
}
}
func NewConn(connId int32, mux *Mux, sendMsgCh chan *msg, sendStatusCh chan int32) *conn {
return &conn{
func NewConn(connId int32, mux *Mux) *conn {
c := &conn{
readCh: make(chan struct{}),
readBuffer: pool.BufPoolCopy.Get().([]byte),
getStatusCh: make(chan struct{}),
connStatusOkCh: make(chan struct{}),
connStatusFailCh: make(chan struct{}),
readTimeOut: time.Time{},
writeTimeOut: time.Time{},
sendMsgCh: sendMsgCh,
sendStatusCh: sendStatusCh,
waitQueue: NewQueue(),
connId: connId,
isClose: false,
mux: mux,
}
return c
}
func (s *conn) Read(buf []byte) (n int, err error) {
if s.isClose {
if s.isClose || buf == nil {
return 0, errors.New("the conn has closed")
}
if s.endRead-s.startRead == 0 {
s.readWait = true
if t := s.readTimeOut.Sub(time.Now()); t > 0 {
timer := time.NewTimer(t)
select {
case <-timer.C:
s.readWait = false
return 0, errors.New("read timeout")
case <-s.readCh:
if s.endRead-s.startRead == 0 { //read finish or start
if s.waitQueue.Size() == 0 {
s.readWait = true
if t := s.readTimeOut.Sub(time.Now()); t > 0 {
timer := time.NewTimer(t)
defer timer.Stop()
select {
case <-timer.C:
s.readWait = false
return 0, errors.New("read timeout")
case <-s.readCh:
}
} else {
<-s.readCh
}
} else {
<-s.readCh
}
}
s.readWait = false
if s.isClose {
return 0, io.EOF
if s.isClose { //If the connection is closed instead of continuing command
return 0, errors.New("the conn has closed")
}
if node, err := s.waitQueue.Pop(); err != nil {
s.Close()
return 0, io.EOF
} else {
pool.PutBufPoolCopy(s.readBuffer)
s.readBuffer = node.val
s.endRead = node.l
s.startRead = 0
}
}
if len(buf) < s.endRead-s.startRead {
n = copy(buf, s.readBuffer[s.startRead:s.startRead+len(buf)])
s.startRead += n
} else {
n = copy(buf, s.readBuffer[s.startRead:s.endRead])
s.startRead = 0
s.endRead = 0
s.sendStatusCh <- s.connId
s.startRead += n
if s.waitQueue.Size() < s.mux.waitQueueSize/2 {
s.mux.sendInfo(MUX_MSG_SEND_OK, s.connId, nil)
}
}
return
}
@@ -99,6 +98,7 @@ func (s *conn) Write(buf []byte) (int, error) {
go s.write(buf, ch)
if t := s.writeTimeOut.Sub(time.Now()); t > 0 {
timer := time.NewTimer(t)
defer timer.Stop()
select {
case <-timer.C:
return 0, errors.New("write timeout")
@@ -112,18 +112,18 @@ func (s *conn) Write(buf []byte) (int, error) {
}
return len(buf), nil
}
func (s *conn) write(buf []byte, ch chan struct{}) {
start := 0
l := len(buf)
for {
if s.stopWrite {
<-s.getStatusCh
}
if l-start > pool.PoolSizeCopy {
s.sendMsgCh <- NewMsg(s.connId, buf[start:start+pool.PoolSizeCopy])
s.mux.sendInfo(MUX_NEW_MSG, s.connId, buf[start:start+pool.PoolSizeCopy])
start += pool.PoolSizeCopy
<-s.getStatusCh
} else {
s.sendMsgCh <- NewMsg(s.connId, buf[start:l])
<-s.getStatusCh
s.mux.sendInfo(MUX_NEW_MSG, s.connId, buf[start:l])
break
}
}
@@ -131,18 +131,30 @@ func (s *conn) write(buf []byte, ch chan struct{}) {
}
func (s *conn) Close() error {
if s.isClose {
return errors.New("the conn has closed")
}
times := 0
retry:
if s.waitQueue.Size() > 0 && times < 600 {
time.Sleep(time.Millisecond * 100)
times++
goto retry
}
if s.isClose {
return errors.New("the conn has closed")
}
s.isClose = true
pool.PutBufPoolCopy(s.readBuffer)
close(s.getStatusCh)
close(s.connStatusOkCh)
close(s.connStatusFailCh)
close(s.readCh)
if !s.mux.IsClose {
s.sendMsgCh <- NewMsg(s.connId, nil)
if s.readWait {
s.readCh <- struct{}{}
}
s.waitQueue.Clear()
s.mux.connMap.Delete(s.connId)
if !s.mux.IsClose {
s.mux.sendInfo(MUX_CONN_CLOSE, s.connId, nil)
}
connPool.Put(s)
return nil
}

View File

@@ -44,6 +44,12 @@ func (s *connMap) Close() {
s.closeCh <- struct{}{}
}
func (s *connMap) Delete(id int32) {
s.Lock()
defer s.Unlock()
delete(s.connMap, id)
}
func (s *connMap) clean() {
ticker := time.NewTimer(time.Minute * 1)
for {

View File

@@ -22,38 +22,35 @@ const (
MUX_PING
MUX_CONN_CLOSE
MUX_PING_RETURN
MUX_STOP_WRITE
RETRY_TIME = 2 //Heart beat allowed fault tolerance times
)
type Mux struct {
net.Listener
conn net.Conn
connMap *connMap
sendMsgCh chan *msg //write msg chan
sendStatusCh chan int32 //write read ok chan
newConnCh chan *conn
id int32
closeChan chan struct{}
IsClose bool
pingOk int
conn net.Conn
connMap *connMap
newConnCh chan *conn
id int32
closeChan chan struct{}
IsClose bool
pingOk int
waitQueueSize int
sync.Mutex
}
func NewMux(c net.Conn) *Mux {
m := &Mux{
conn: c,
connMap: NewConnMap(),
sendMsgCh: make(chan *msg),
sendStatusCh: make(chan int32),
id: 0,
closeChan: make(chan struct{}),
newConnCh: make(chan *conn),
IsClose: false,
conn: c,
connMap: NewConnMap(),
id: 0,
closeChan: make(chan struct{}),
newConnCh: make(chan *conn),
IsClose: false,
waitQueueSize: 10, //TODO :In order to be more efficient, this value can be dynamically generated according to the delay algorithm.
}
//read session by flag
go m.readSession()
//write session
go m.writeSession()
//ping
go m.ping()
return m
@@ -63,7 +60,7 @@ func (s *Mux) NewConn() (*conn, error) {
if s.IsClose {
return nil, errors.New("the mux has closed")
}
conn := NewConn(s.getId(), s, s.sendMsgCh, s.sendStatusCh)
conn := NewConn(s.getId(), s)
raw := bytes.NewBuffer([]byte{})
if err := binary.Write(raw, binary.LittleEndian, MUX_NEW_CONN); err != nil {
return nil, err
@@ -76,10 +73,14 @@ func (s *Mux) NewConn() (*conn, error) {
if _, err := s.conn.Write(raw.Bytes()); err != nil {
return nil, err
}
//set a timer timeout 30 second
timer := time.NewTimer(time.Second * 30)
defer timer.Stop()
select {
case <-conn.connStatusOkCh:
return conn, nil
case <-conn.connStatusFailCh:
case <-timer.C:
}
return nil, errors.New("create connection failthe server refused the connection")
}
@@ -95,10 +96,24 @@ func (s *Mux) Addr() net.Addr {
return s.conn.LocalAddr()
}
func (s *Mux) sendInfo(flag int32, id int32, content []byte) error {
raw := bytes.NewBuffer([]byte{})
binary.Write(raw, binary.LittleEndian, flag)
binary.Write(raw, binary.LittleEndian, id)
if content != nil && len(content) > 0 {
binary.Write(raw, binary.LittleEndian, int32(len(content)))
binary.Write(raw, binary.LittleEndian, content)
}
if _, err := s.conn.Write(raw.Bytes()); err != nil || s.pingOk > RETRY_TIME {
s.Close()
return err
}
return nil
}
func (s *Mux) ping() {
go func() {
ticker := time.NewTicker(time.Second * 5)
raw := bytes.NewBuffer([]byte{})
for {
select {
case <-ticker.C:
@@ -107,11 +122,7 @@ func (s *Mux) ping() {
if (math.MaxInt32 - s.id) < 10000 {
s.id = 0
}
raw.Reset()
binary.Write(raw, binary.LittleEndian, MUX_PING_FLAG)
binary.Write(raw, binary.LittleEndian, MUX_PING)
if _, err := s.conn.Write(raw.Bytes()); err != nil || s.pingOk > RETRY_TIME {
s.Close()
if err := s.sendInfo(MUX_PING_FLAG, MUX_PING, nil); err != nil || s.pingOk > RETRY_TIME {
break
}
s.pingOk += 1
@@ -122,45 +133,9 @@ func (s *Mux) ping() {
}
}
func (s *Mux) writeSession() {
go func() {
raw := bytes.NewBuffer([]byte{})
for {
raw.Reset()
select {
case msg := <-s.sendMsgCh:
if msg == nil {
break
}
if msg.content == nil { //close
binary.Write(raw, binary.LittleEndian, MUX_CONN_CLOSE)
binary.Write(raw, binary.LittleEndian, msg.connId)
break
}
binary.Write(raw, binary.LittleEndian, MUX_NEW_MSG)
binary.Write(raw, binary.LittleEndian, msg.connId)
binary.Write(raw, binary.LittleEndian, int32(len(msg.content)))
binary.Write(raw, binary.LittleEndian, msg.content)
case connId := <-s.sendStatusCh:
binary.Write(raw, binary.LittleEndian, MUX_MSG_SEND_OK)
binary.Write(raw, binary.LittleEndian, connId)
}
if _, err := s.conn.Write(raw.Bytes()); err != nil {
s.Close()
break
}
}
}()
select {
case <-s.closeChan:
}
}
func (s *Mux) readSession() {
var buf []byte
go func() {
raw := bytes.NewBuffer([]byte{})
buf := pool.BufPoolCopy.Get().([]byte)
defer pool.PutBufPoolCopy(buf)
for {
var flag, i int32
var n int
@@ -171,24 +146,19 @@ func (s *Mux) readSession() {
}
switch flag {
case MUX_NEW_CONN: //new conn
conn := NewConn(i, s, s.sendMsgCh, s.sendStatusCh)
conn := NewConn(i, s)
s.connMap.Set(i, conn) //it has been set before send ok
s.newConnCh <- conn
raw.Reset()
binary.Write(raw, binary.LittleEndian, MUX_NEW_CONN_OK)
binary.Write(raw, binary.LittleEndian, i)
s.conn.Write(raw.Bytes())
s.sendInfo(MUX_NEW_CONN_OK, i, nil)
continue
case MUX_PING_FLAG: //ping
raw.Reset()
binary.Write(raw, binary.LittleEndian, MUX_PING_RETURN)
binary.Write(raw, binary.LittleEndian, MUX_PING)
s.conn.Write(raw.Bytes())
s.sendInfo(MUX_PING_RETURN, MUX_PING, nil)
continue
case MUX_PING_RETURN:
s.pingOk -= 1
continue
case MUX_NEW_MSG:
buf = pool.GetBufPoolCopy()
if n, err = ReadLenBytes(buf, s.conn); err != nil {
break
}
@@ -196,20 +166,36 @@ func (s *Mux) readSession() {
if conn, ok := s.connMap.Get(i); ok && !conn.isClose {
switch flag {
case MUX_NEW_MSG: //new msg from remote conn
copy(conn.readBuffer, buf[:n])
conn.endRead = n
//insert wait queue
conn.waitQueue.Push(NewBufNode(buf, n))
//judge len if >xxx ,send stop
if conn.readWait {
conn.readWait = false
conn.readCh <- struct{}{}
}
if conn.waitQueue.Size() > s.waitQueueSize {
s.sendInfo(MUX_STOP_WRITE, conn.connId, nil)
}
case MUX_STOP_WRITE:
conn.stopWrite = true
case MUX_MSG_SEND_OK: //the remote has read
conn.getStatusCh <- struct{}{}
if conn.stopWrite {
conn.stopWrite = false
select {
case conn.getStatusCh <- struct{}{}:
default:
}
}
case MUX_NEW_CONN_OK: //conn ok
conn.connStatusOkCh <- struct{}{}
case MUX_NEW_CONN_Fail:
conn.connStatusFailCh <- struct{}{}
case MUX_CONN_CLOSE: //close the connection
conn.Close()
go conn.Close()
s.connMap.Delete(i)
}
} else if flag == MUX_NEW_MSG {
pool.PutBufPoolCopy(buf)
}
} else {
break
@@ -231,9 +217,6 @@ func (s *Mux) Close() error {
s.closeChan <- struct{}{}
s.closeChan <- struct{}{}
s.closeChan <- struct{}{}
close(s.closeChan)
close(s.sendMsgCh)
close(s.sendStatusCh)
return s.conn.Close()
}

View File

@@ -2,7 +2,7 @@ package mux
import (
"github.com/cnlh/nps/lib/common"
conn3 "github.com/cnlh/nps/lib/conn"
"github.com/cnlh/nps/lib/pool"
"github.com/cnlh/nps/vender/github.com/astaxie/beego/logs"
"log"
"net"
@@ -17,7 +17,7 @@ var conn2 net.Conn
func TestNewMux(t *testing.T) {
go func() {
http.ListenAndServe("0.0.0.0:8899", nil)
http.ListenAndServe("0.0.0.0:8889", nil)
}()
logs.EnableFuncCallDepth(true)
logs.SetLogFuncCallDepth(3)
@@ -32,12 +32,12 @@ func TestNewMux(t *testing.T) {
log.Fatalln(err)
}
go func(c net.Conn) {
c2, err := net.Dial("tcp", "127.0.0.1:8080")
c2, err := net.Dial("tcp", "10.1.50.196:4000")
if err != nil {
log.Fatalln(err)
}
go common.CopyBuffer(c2, conn3.NewCryptConn(c, true, nil))
common.CopyBuffer(conn3.NewCryptConn(c, true, nil), c2)
go common.CopyBuffer(c2, c)
common.CopyBuffer(c, c2)
c.Close()
c2.Close()
}(c)
@@ -60,8 +60,8 @@ func TestNewMux(t *testing.T) {
if err != nil {
log.Fatalln(err)
}
go common.CopyBuffer(conn3.NewCryptConn(tmpCpnn, true, nil), conn)
common.CopyBuffer(conn, conn3.NewCryptConn(tmpCpnn, true, nil))
go common.CopyBuffer(tmpCpnn, conn)
common.CopyBuffer(conn, tmpCpnn)
conn.Close()
tmpCpnn.Close()
}(conn)
@@ -95,3 +95,15 @@ func client() {
log.Fatalln(err)
}
}
func TestNewConn(t *testing.T) {
buf := pool.GetBufPoolCopy()
logs.Warn(len(buf), cap(buf))
//b := pool.GetBufPoolCopy()
//b[0] = 1
//b[1] = 2
//b[2] = 3
b := []byte{1, 2, 3}
logs.Warn(copy(buf[:3], b), len(buf), cap(buf))
logs.Warn(len(buf), buf[0])
}

View File

@@ -5,10 +5,12 @@ package mux
import (
"bufio"
"bytes"
"github.com/cnlh/nps/lib/common"
"github.com/cnlh/nps/vender/github.com/astaxie/beego/logs"
"github.com/pkg/errors"
"io"
"net"
"os"
"strconv"
"strings"
"time"
@@ -59,7 +61,8 @@ func (pMux *PortMux) Start() error {
}
pMux.Listener, err = net.ListenTCP("tcp", tcpAddr)
if err != nil {
return err
logs.Error(err)
os.Exit(0)
}
go func() {
for {
@@ -105,7 +108,7 @@ func (pMux *PortMux) process(conn net.Conn) {
str = strings.Replace(str, "host:", "", -1)
str = strings.TrimSpace(str)
// Determine whether it is the same as the manager domain name
if str == pMux.managerHost {
if common.GetIpByAddr(str) == pMux.managerHost {
ch = pMux.managerConn
} else {
ch = pMux.httpConn

View File

@@ -11,7 +11,7 @@ func TestPortMux_Close(t *testing.T) {
logs.EnableFuncCallDepth(true)
logs.SetLogFuncCallDepth(3)
pMux := NewPortMux(8888)
pMux := NewPortMux(8888,"Ds")
go func() {
if pMux.Start() != nil {
logs.Warn("Error")
@@ -19,21 +19,21 @@ func TestPortMux_Close(t *testing.T) {
}()
time.Sleep(time.Second * 3)
go func() {
l := pMux.GetHttpsAccept()
l := pMux.GetHttpListener()
conn, err := l.Accept()
logs.Warn(conn, err)
}()
go func() {
l := pMux.GetHttpAccept()
l := pMux.GetHttpListener()
conn, err := l.Accept()
logs.Warn(conn, err)
}()
go func() {
l := pMux.GetClientAccept()
l := pMux.GetHttpListener()
conn, err := l.Accept()
logs.Warn(conn, err)
}()
l := pMux.GetManagerAccept()
l := pMux.GetHttpListener()
conn, err := l.Accept()
logs.Warn(conn, err)
}

82
lib/mux/queue.go Normal file
View File

@@ -0,0 +1,82 @@
package mux
import (
"errors"
"github.com/cnlh/nps/lib/pool"
"sync"
)
type Element *bufNode
type bufNode struct {
val []byte //buf value
l int //length
}
func NewBufNode(buf []byte, l int) *bufNode {
return &bufNode{
val: buf,
l: l,
}
}
type Queue interface {
Push(e Element) //向队列中添加元素
Pop() Element //移除队列中最前面的元素
Clear() bool //清空队列
Size() int //获取队列的元素个数
IsEmpty() bool //判断队列是否是空
}
type sliceEntry struct {
element []Element
sync.Mutex
}
func NewQueue() *sliceEntry {
return &sliceEntry{}
}
//向队列中添加元素
func (entry *sliceEntry) Push(e Element) {
entry.Lock()
defer entry.Unlock()
entry.element = append(entry.element, e)
}
//移除队列中最前面的额元素
func (entry *sliceEntry) Pop() (Element, error) {
if entry.IsEmpty() {
return nil, errors.New("queue is empty!")
}
entry.Lock()
defer entry.Unlock()
firstElement := entry.element[0]
entry.element = entry.element[1:]
return firstElement, nil
}
func (entry *sliceEntry) Clear() bool {
entry.Lock()
defer entry.Unlock()
if entry.IsEmpty() {
return false
}
for i := 0; i < entry.Size(); i++ {
pool.PutBufPoolCopy(entry.element[i].val)
entry.element[i] = nil
}
entry.element = nil
return true
}
func (entry *sliceEntry) Size() int {
return len(entry.element)
}
func (entry *sliceEntry) IsEmpty() bool {
if len(entry.element) == 0 {
return true
}
return false
}

View File

@@ -32,10 +32,10 @@ var BufPoolSmall = sync.Pool{
}
var BufPoolCopy = sync.Pool{
New: func() interface{} {
return make([]byte, PoolSizeCopy)
buf := make([]byte, PoolSizeCopy)
return &buf
},
}
func PutBufPoolUdp(buf []byte) {
if cap(buf) == PoolSizeUdp {
BufPoolUdp.Put(buf[:PoolSizeUdp])
@@ -44,10 +44,14 @@ func PutBufPoolUdp(buf []byte) {
func PutBufPoolCopy(buf []byte) {
if cap(buf) == PoolSizeCopy {
BufPoolCopy.Put(buf[:PoolSizeCopy])
BufPoolCopy.Put(&buf)
}
}
func GetBufPoolCopy() ([]byte) {
return (*BufPoolCopy.Get().(*[]byte))[:PoolSizeCopy]
}
func PutBufPoolSmall(buf []byte) {
if cap(buf) == PoolSizeSmall {
BufPoolSmall.Put(buf[:PoolSizeSmall])