From a9d4a4c50cdfa886e89f6ceb850ccd6992602890 Mon Sep 17 00:00:00 2001 From: Xargin Date: Tue, 14 Aug 2018 14:48:30 +0800 Subject: [PATCH] update d conf --- ch6-cloud/ch6-06-config.md | 67 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/ch6-cloud/ch6-06-config.md b/ch6-cloud/ch6-06-config.md index 3c757a4..02e633f 100644 --- a/ch6-cloud/ch6-06-config.md +++ b/ch6-cloud/ch6-06-config.md @@ -48,6 +48,73 @@ go func() { }() ``` +### 整合起来 + +```go +package main + +import ( + "fmt" + "log" + "time" + + "golang.org/x/net/context" + + "github.com/coreos/etcd/client" +) + +func watchAndUpdate() { +} + +func set() error { + return nil +} + +func get() (string, error) { + return "", nil +} + +func main() { + cfg := client.Config{ + Endpoints: []string{"http://127.0.0.1:2379"}, + Transport: client.DefaultTransport, + HeaderTimeoutPerRequest: time.Second, + } + + c, err := client.New(cfg) + if err != nil { + log.Fatal(err) + } + kapi := client.NewKeysAPI(c) + w := kapi.Watcher("/name", nil) + go func() { + for { + resp, err := w.Next(context.Background()) + fmt.Println(resp, err) + fmt.Println("new values is ", resp.Node.Value) + } + }() + + log.Print("Setting /name to alex") + resp, err := kapi.Set(context.Background(), "/name", "alex", nil) + if err != nil { + log.Fatal(err) + } else { + log.Printf("Set is done. Metadata is %q\n", resp) + } + + log.Print("Getting /name key value") + resp, err = kapi.Get(context.Background(), "/name", nil) + if err != nil { + log.Fatal(err) + } else { + log.Printf("Get is done. Metadata is %q\n", resp) + log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) + } + time.Sleep(time.Minute) +} +``` + ### 配置膨胀之后 随着业务的发展,配置系统本身所承载的压力可能也会越来越大,配置文件可能成千上万。客户端同样上万,将配置内容存储在 etcd 内部便不再合适了。随着配置文件数量的膨胀,除了存储系统本身的吞吐量问题,还有配置信息的管理问题。我们需要对相应的配置进行权限管理,需要根据业务量进行配置存储的集群划分。如果客户端太多,导致了配置存储系统无法承受瞬时大量的 QPS,那可能还需要在客户端侧进行缓存优化,等等。