MUX optimization

This commit is contained in:
刘河
2019-03-15 14:03:49 +08:00
parent f78e81b452
commit 97330bfbdc
33 changed files with 749 additions and 328 deletions

View File

@@ -5,6 +5,7 @@ import (
"github.com/cnlh/nps/lib/pool"
"io"
"net"
"sync"
"time"
)
@@ -15,78 +16,76 @@ type conn struct {
connStatusFailCh chan struct{}
readTimeOut time.Time
writeTimeOut time.Time
sendMsgCh chan *msg //mux
sendStatusCh chan int32 //mux
readBuffer []byte
startRead int //now read position
endRead int //now end read
readFlag bool
readCh chan struct{}
waitQueue *sliceEntry
stopWrite bool
connId int32
isClose bool
readWait bool
mux *Mux
}
type msg struct {
connId int32
content []byte
}
var connPool = sync.Pool{}
func NewMsg(connId int32, content []byte) *msg {
return &msg{
connId: connId,
content: content,
}
}
func NewConn(connId int32, mux *Mux, sendMsgCh chan *msg, sendStatusCh chan int32) *conn {
return &conn{
func NewConn(connId int32, mux *Mux) *conn {
c := &conn{
readCh: make(chan struct{}),
readBuffer: pool.BufPoolCopy.Get().([]byte),
getStatusCh: make(chan struct{}),
connStatusOkCh: make(chan struct{}),
connStatusFailCh: make(chan struct{}),
readTimeOut: time.Time{},
writeTimeOut: time.Time{},
sendMsgCh: sendMsgCh,
sendStatusCh: sendStatusCh,
waitQueue: NewQueue(),
connId: connId,
isClose: false,
mux: mux,
}
return c
}
func (s *conn) Read(buf []byte) (n int, err error) {
if s.isClose {
if s.isClose || buf == nil {
return 0, errors.New("the conn has closed")
}
if s.endRead-s.startRead == 0 {
s.readWait = true
if t := s.readTimeOut.Sub(time.Now()); t > 0 {
timer := time.NewTimer(t)
select {
case <-timer.C:
s.readWait = false
return 0, errors.New("read timeout")
case <-s.readCh:
if s.endRead-s.startRead == 0 { //read finish or start
if s.waitQueue.Size() == 0 {
s.readWait = true
if t := s.readTimeOut.Sub(time.Now()); t > 0 {
timer := time.NewTimer(t)
defer timer.Stop()
select {
case <-timer.C:
s.readWait = false
return 0, errors.New("read timeout")
case <-s.readCh:
}
} else {
<-s.readCh
}
} else {
<-s.readCh
}
}
s.readWait = false
if s.isClose {
return 0, io.EOF
if s.isClose { //If the connection is closed instead of continuing command
return 0, errors.New("the conn has closed")
}
if node, err := s.waitQueue.Pop(); err != nil {
s.Close()
return 0, io.EOF
} else {
pool.PutBufPoolCopy(s.readBuffer)
s.readBuffer = node.val
s.endRead = node.l
s.startRead = 0
}
}
if len(buf) < s.endRead-s.startRead {
n = copy(buf, s.readBuffer[s.startRead:s.startRead+len(buf)])
s.startRead += n
} else {
n = copy(buf, s.readBuffer[s.startRead:s.endRead])
s.startRead = 0
s.endRead = 0
s.sendStatusCh <- s.connId
s.startRead += n
if s.waitQueue.Size() < s.mux.waitQueueSize/2 {
s.mux.sendInfo(MUX_MSG_SEND_OK, s.connId, nil)
}
}
return
}
@@ -99,6 +98,7 @@ func (s *conn) Write(buf []byte) (int, error) {
go s.write(buf, ch)
if t := s.writeTimeOut.Sub(time.Now()); t > 0 {
timer := time.NewTimer(t)
defer timer.Stop()
select {
case <-timer.C:
return 0, errors.New("write timeout")
@@ -112,18 +112,18 @@ func (s *conn) Write(buf []byte) (int, error) {
}
return len(buf), nil
}
func (s *conn) write(buf []byte, ch chan struct{}) {
start := 0
l := len(buf)
for {
if s.stopWrite {
<-s.getStatusCh
}
if l-start > pool.PoolSizeCopy {
s.sendMsgCh <- NewMsg(s.connId, buf[start:start+pool.PoolSizeCopy])
s.mux.sendInfo(MUX_NEW_MSG, s.connId, buf[start:start+pool.PoolSizeCopy])
start += pool.PoolSizeCopy
<-s.getStatusCh
} else {
s.sendMsgCh <- NewMsg(s.connId, buf[start:l])
<-s.getStatusCh
s.mux.sendInfo(MUX_NEW_MSG, s.connId, buf[start:l])
break
}
}
@@ -131,18 +131,30 @@ func (s *conn) write(buf []byte, ch chan struct{}) {
}
func (s *conn) Close() error {
if s.isClose {
return errors.New("the conn has closed")
}
times := 0
retry:
if s.waitQueue.Size() > 0 && times < 600 {
time.Sleep(time.Millisecond * 100)
times++
goto retry
}
if s.isClose {
return errors.New("the conn has closed")
}
s.isClose = true
pool.PutBufPoolCopy(s.readBuffer)
close(s.getStatusCh)
close(s.connStatusOkCh)
close(s.connStatusFailCh)
close(s.readCh)
if !s.mux.IsClose {
s.sendMsgCh <- NewMsg(s.connId, nil)
if s.readWait {
s.readCh <- struct{}{}
}
s.waitQueue.Clear()
s.mux.connMap.Delete(s.connId)
if !s.mux.IsClose {
s.mux.sendInfo(MUX_CONN_CLOSE, s.connId, nil)
}
connPool.Put(s)
return nil
}

View File

@@ -44,6 +44,12 @@ func (s *connMap) Close() {
s.closeCh <- struct{}{}
}
func (s *connMap) Delete(id int32) {
s.Lock()
defer s.Unlock()
delete(s.connMap, id)
}
func (s *connMap) clean() {
ticker := time.NewTimer(time.Minute * 1)
for {

View File

@@ -22,38 +22,35 @@ const (
MUX_PING
MUX_CONN_CLOSE
MUX_PING_RETURN
MUX_STOP_WRITE
RETRY_TIME = 2 //Heart beat allowed fault tolerance times
)
type Mux struct {
net.Listener
conn net.Conn
connMap *connMap
sendMsgCh chan *msg //write msg chan
sendStatusCh chan int32 //write read ok chan
newConnCh chan *conn
id int32
closeChan chan struct{}
IsClose bool
pingOk int
conn net.Conn
connMap *connMap
newConnCh chan *conn
id int32
closeChan chan struct{}
IsClose bool
pingOk int
waitQueueSize int
sync.Mutex
}
func NewMux(c net.Conn) *Mux {
m := &Mux{
conn: c,
connMap: NewConnMap(),
sendMsgCh: make(chan *msg),
sendStatusCh: make(chan int32),
id: 0,
closeChan: make(chan struct{}),
newConnCh: make(chan *conn),
IsClose: false,
conn: c,
connMap: NewConnMap(),
id: 0,
closeChan: make(chan struct{}),
newConnCh: make(chan *conn),
IsClose: false,
waitQueueSize: 10, //TODO :In order to be more efficient, this value can be dynamically generated according to the delay algorithm.
}
//read session by flag
go m.readSession()
//write session
go m.writeSession()
//ping
go m.ping()
return m
@@ -63,7 +60,7 @@ func (s *Mux) NewConn() (*conn, error) {
if s.IsClose {
return nil, errors.New("the mux has closed")
}
conn := NewConn(s.getId(), s, s.sendMsgCh, s.sendStatusCh)
conn := NewConn(s.getId(), s)
raw := bytes.NewBuffer([]byte{})
if err := binary.Write(raw, binary.LittleEndian, MUX_NEW_CONN); err != nil {
return nil, err
@@ -76,10 +73,14 @@ func (s *Mux) NewConn() (*conn, error) {
if _, err := s.conn.Write(raw.Bytes()); err != nil {
return nil, err
}
//set a timer timeout 30 second
timer := time.NewTimer(time.Second * 30)
defer timer.Stop()
select {
case <-conn.connStatusOkCh:
return conn, nil
case <-conn.connStatusFailCh:
case <-timer.C:
}
return nil, errors.New("create connection failthe server refused the connection")
}
@@ -95,10 +96,24 @@ func (s *Mux) Addr() net.Addr {
return s.conn.LocalAddr()
}
func (s *Mux) sendInfo(flag int32, id int32, content []byte) error {
raw := bytes.NewBuffer([]byte{})
binary.Write(raw, binary.LittleEndian, flag)
binary.Write(raw, binary.LittleEndian, id)
if content != nil && len(content) > 0 {
binary.Write(raw, binary.LittleEndian, int32(len(content)))
binary.Write(raw, binary.LittleEndian, content)
}
if _, err := s.conn.Write(raw.Bytes()); err != nil || s.pingOk > RETRY_TIME {
s.Close()
return err
}
return nil
}
func (s *Mux) ping() {
go func() {
ticker := time.NewTicker(time.Second * 5)
raw := bytes.NewBuffer([]byte{})
for {
select {
case <-ticker.C:
@@ -107,11 +122,7 @@ func (s *Mux) ping() {
if (math.MaxInt32 - s.id) < 10000 {
s.id = 0
}
raw.Reset()
binary.Write(raw, binary.LittleEndian, MUX_PING_FLAG)
binary.Write(raw, binary.LittleEndian, MUX_PING)
if _, err := s.conn.Write(raw.Bytes()); err != nil || s.pingOk > RETRY_TIME {
s.Close()
if err := s.sendInfo(MUX_PING_FLAG, MUX_PING, nil); err != nil || s.pingOk > RETRY_TIME {
break
}
s.pingOk += 1
@@ -122,45 +133,9 @@ func (s *Mux) ping() {
}
}
func (s *Mux) writeSession() {
go func() {
raw := bytes.NewBuffer([]byte{})
for {
raw.Reset()
select {
case msg := <-s.sendMsgCh:
if msg == nil {
break
}
if msg.content == nil { //close
binary.Write(raw, binary.LittleEndian, MUX_CONN_CLOSE)
binary.Write(raw, binary.LittleEndian, msg.connId)
break
}
binary.Write(raw, binary.LittleEndian, MUX_NEW_MSG)
binary.Write(raw, binary.LittleEndian, msg.connId)
binary.Write(raw, binary.LittleEndian, int32(len(msg.content)))
binary.Write(raw, binary.LittleEndian, msg.content)
case connId := <-s.sendStatusCh:
binary.Write(raw, binary.LittleEndian, MUX_MSG_SEND_OK)
binary.Write(raw, binary.LittleEndian, connId)
}
if _, err := s.conn.Write(raw.Bytes()); err != nil {
s.Close()
break
}
}
}()
select {
case <-s.closeChan:
}
}
func (s *Mux) readSession() {
var buf []byte
go func() {
raw := bytes.NewBuffer([]byte{})
buf := pool.BufPoolCopy.Get().([]byte)
defer pool.PutBufPoolCopy(buf)
for {
var flag, i int32
var n int
@@ -171,24 +146,19 @@ func (s *Mux) readSession() {
}
switch flag {
case MUX_NEW_CONN: //new conn
conn := NewConn(i, s, s.sendMsgCh, s.sendStatusCh)
conn := NewConn(i, s)
s.connMap.Set(i, conn) //it has been set before send ok
s.newConnCh <- conn
raw.Reset()
binary.Write(raw, binary.LittleEndian, MUX_NEW_CONN_OK)
binary.Write(raw, binary.LittleEndian, i)
s.conn.Write(raw.Bytes())
s.sendInfo(MUX_NEW_CONN_OK, i, nil)
continue
case MUX_PING_FLAG: //ping
raw.Reset()
binary.Write(raw, binary.LittleEndian, MUX_PING_RETURN)
binary.Write(raw, binary.LittleEndian, MUX_PING)
s.conn.Write(raw.Bytes())
s.sendInfo(MUX_PING_RETURN, MUX_PING, nil)
continue
case MUX_PING_RETURN:
s.pingOk -= 1
continue
case MUX_NEW_MSG:
buf = pool.GetBufPoolCopy()
if n, err = ReadLenBytes(buf, s.conn); err != nil {
break
}
@@ -196,20 +166,36 @@ func (s *Mux) readSession() {
if conn, ok := s.connMap.Get(i); ok && !conn.isClose {
switch flag {
case MUX_NEW_MSG: //new msg from remote conn
copy(conn.readBuffer, buf[:n])
conn.endRead = n
//insert wait queue
conn.waitQueue.Push(NewBufNode(buf, n))
//judge len if >xxx ,send stop
if conn.readWait {
conn.readWait = false
conn.readCh <- struct{}{}
}
if conn.waitQueue.Size() > s.waitQueueSize {
s.sendInfo(MUX_STOP_WRITE, conn.connId, nil)
}
case MUX_STOP_WRITE:
conn.stopWrite = true
case MUX_MSG_SEND_OK: //the remote has read
conn.getStatusCh <- struct{}{}
if conn.stopWrite {
conn.stopWrite = false
select {
case conn.getStatusCh <- struct{}{}:
default:
}
}
case MUX_NEW_CONN_OK: //conn ok
conn.connStatusOkCh <- struct{}{}
case MUX_NEW_CONN_Fail:
conn.connStatusFailCh <- struct{}{}
case MUX_CONN_CLOSE: //close the connection
conn.Close()
go conn.Close()
s.connMap.Delete(i)
}
} else if flag == MUX_NEW_MSG {
pool.PutBufPoolCopy(buf)
}
} else {
break
@@ -231,9 +217,6 @@ func (s *Mux) Close() error {
s.closeChan <- struct{}{}
s.closeChan <- struct{}{}
s.closeChan <- struct{}{}
close(s.closeChan)
close(s.sendMsgCh)
close(s.sendStatusCh)
return s.conn.Close()
}

View File

@@ -2,7 +2,7 @@ package mux
import (
"github.com/cnlh/nps/lib/common"
conn3 "github.com/cnlh/nps/lib/conn"
"github.com/cnlh/nps/lib/pool"
"github.com/cnlh/nps/vender/github.com/astaxie/beego/logs"
"log"
"net"
@@ -17,7 +17,7 @@ var conn2 net.Conn
func TestNewMux(t *testing.T) {
go func() {
http.ListenAndServe("0.0.0.0:8899", nil)
http.ListenAndServe("0.0.0.0:8889", nil)
}()
logs.EnableFuncCallDepth(true)
logs.SetLogFuncCallDepth(3)
@@ -32,12 +32,12 @@ func TestNewMux(t *testing.T) {
log.Fatalln(err)
}
go func(c net.Conn) {
c2, err := net.Dial("tcp", "127.0.0.1:8080")
c2, err := net.Dial("tcp", "10.1.50.196:4000")
if err != nil {
log.Fatalln(err)
}
go common.CopyBuffer(c2, conn3.NewCryptConn(c, true, nil))
common.CopyBuffer(conn3.NewCryptConn(c, true, nil), c2)
go common.CopyBuffer(c2, c)
common.CopyBuffer(c, c2)
c.Close()
c2.Close()
}(c)
@@ -60,8 +60,8 @@ func TestNewMux(t *testing.T) {
if err != nil {
log.Fatalln(err)
}
go common.CopyBuffer(conn3.NewCryptConn(tmpCpnn, true, nil), conn)
common.CopyBuffer(conn, conn3.NewCryptConn(tmpCpnn, true, nil))
go common.CopyBuffer(tmpCpnn, conn)
common.CopyBuffer(conn, tmpCpnn)
conn.Close()
tmpCpnn.Close()
}(conn)
@@ -95,3 +95,15 @@ func client() {
log.Fatalln(err)
}
}
func TestNewConn(t *testing.T) {
buf := pool.GetBufPoolCopy()
logs.Warn(len(buf), cap(buf))
//b := pool.GetBufPoolCopy()
//b[0] = 1
//b[1] = 2
//b[2] = 3
b := []byte{1, 2, 3}
logs.Warn(copy(buf[:3], b), len(buf), cap(buf))
logs.Warn(len(buf), buf[0])
}

View File

@@ -5,10 +5,12 @@ package mux
import (
"bufio"
"bytes"
"github.com/cnlh/nps/lib/common"
"github.com/cnlh/nps/vender/github.com/astaxie/beego/logs"
"github.com/pkg/errors"
"io"
"net"
"os"
"strconv"
"strings"
"time"
@@ -59,7 +61,8 @@ func (pMux *PortMux) Start() error {
}
pMux.Listener, err = net.ListenTCP("tcp", tcpAddr)
if err != nil {
return err
logs.Error(err)
os.Exit(0)
}
go func() {
for {
@@ -105,7 +108,7 @@ func (pMux *PortMux) process(conn net.Conn) {
str = strings.Replace(str, "host:", "", -1)
str = strings.TrimSpace(str)
// Determine whether it is the same as the manager domain name
if str == pMux.managerHost {
if common.GetIpByAddr(str) == pMux.managerHost {
ch = pMux.managerConn
} else {
ch = pMux.httpConn

View File

@@ -11,7 +11,7 @@ func TestPortMux_Close(t *testing.T) {
logs.EnableFuncCallDepth(true)
logs.SetLogFuncCallDepth(3)
pMux := NewPortMux(8888)
pMux := NewPortMux(8888,"Ds")
go func() {
if pMux.Start() != nil {
logs.Warn("Error")
@@ -19,21 +19,21 @@ func TestPortMux_Close(t *testing.T) {
}()
time.Sleep(time.Second * 3)
go func() {
l := pMux.GetHttpsAccept()
l := pMux.GetHttpListener()
conn, err := l.Accept()
logs.Warn(conn, err)
}()
go func() {
l := pMux.GetHttpAccept()
l := pMux.GetHttpListener()
conn, err := l.Accept()
logs.Warn(conn, err)
}()
go func() {
l := pMux.GetClientAccept()
l := pMux.GetHttpListener()
conn, err := l.Accept()
logs.Warn(conn, err)
}()
l := pMux.GetManagerAccept()
l := pMux.GetHttpListener()
conn, err := l.Accept()
logs.Warn(conn, err)
}

82
lib/mux/queue.go Normal file
View File

@@ -0,0 +1,82 @@
package mux
import (
"errors"
"github.com/cnlh/nps/lib/pool"
"sync"
)
type Element *bufNode
type bufNode struct {
val []byte //buf value
l int //length
}
func NewBufNode(buf []byte, l int) *bufNode {
return &bufNode{
val: buf,
l: l,
}
}
type Queue interface {
Push(e Element) //向队列中添加元素
Pop() Element //移除队列中最前面的元素
Clear() bool //清空队列
Size() int //获取队列的元素个数
IsEmpty() bool //判断队列是否是空
}
type sliceEntry struct {
element []Element
sync.Mutex
}
func NewQueue() *sliceEntry {
return &sliceEntry{}
}
//向队列中添加元素
func (entry *sliceEntry) Push(e Element) {
entry.Lock()
defer entry.Unlock()
entry.element = append(entry.element, e)
}
//移除队列中最前面的额元素
func (entry *sliceEntry) Pop() (Element, error) {
if entry.IsEmpty() {
return nil, errors.New("queue is empty!")
}
entry.Lock()
defer entry.Unlock()
firstElement := entry.element[0]
entry.element = entry.element[1:]
return firstElement, nil
}
func (entry *sliceEntry) Clear() bool {
entry.Lock()
defer entry.Unlock()
if entry.IsEmpty() {
return false
}
for i := 0; i < entry.Size(); i++ {
pool.PutBufPoolCopy(entry.element[i].val)
entry.element[i] = nil
}
entry.element = nil
return true
}
func (entry *sliceEntry) Size() int {
return len(entry.element)
}
func (entry *sliceEntry) IsEmpty() bool {
if len(entry.element) == 0 {
return true
}
return false
}