diff options
author | robot-piglet <robot-piglet@yandex-team.com> | 2023-12-02 01:45:21 +0300 |
---|---|---|
committer | robot-piglet <robot-piglet@yandex-team.com> | 2023-12-02 02:42:50 +0300 |
commit | 9c43d58f75cf086b744cf4fe2ae180e8f37e4a0c (patch) | |
tree | 9f88a486917d371d099cd712efd91b4c122d209d /vendor | |
parent | 32fb6dda1feb24f9ab69ece5df0cb9ec238ca5e6 (diff) | |
download | ydb-9c43d58f75cf086b744cf4fe2ae180e8f37e4a0c.tar.gz |
Intermediate changes
Diffstat (limited to 'vendor')
47 files changed, 15765 insertions, 0 deletions
diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go new file mode 100644 index 0000000000..7a6f8203c6 --- /dev/null +++ b/vendor/github.com/dgryski/go-rendezvous/rdv.go @@ -0,0 +1,79 @@ +package rendezvous + +type Rendezvous struct { + nodes map[string]int + nstr []string + nhash []uint64 + hash Hasher +} + +type Hasher func(s string) uint64 + +func New(nodes []string, hash Hasher) *Rendezvous { + r := &Rendezvous{ + nodes: make(map[string]int, len(nodes)), + nstr: make([]string, len(nodes)), + nhash: make([]uint64, len(nodes)), + hash: hash, + } + + for i, n := range nodes { + r.nodes[n] = i + r.nstr[i] = n + r.nhash[i] = hash(n) + } + + return r +} + +func (r *Rendezvous) Lookup(k string) string { + // short-circuit if we're empty + if len(r.nodes) == 0 { + return "" + } + + khash := r.hash(k) + + var midx int + var mhash = xorshiftMult64(khash ^ r.nhash[0]) + + for i, nhash := range r.nhash[1:] { + if h := xorshiftMult64(khash ^ nhash); h > mhash { + midx = i + 1 + mhash = h + } + } + + return r.nstr[midx] +} + +func (r *Rendezvous) Add(node string) { + r.nodes[node] = len(r.nstr) + r.nstr = append(r.nstr, node) + r.nhash = append(r.nhash, r.hash(node)) +} + +func (r *Rendezvous) Remove(node string) { + // find index of node to remove + nidx := r.nodes[node] + + // remove from the slices + l := len(r.nstr) + r.nstr[nidx] = r.nstr[l] + r.nstr = r.nstr[:l] + + r.nhash[nidx] = r.nhash[l] + r.nhash = r.nhash[:l] + + // update the map + delete(r.nodes, node) + moved := r.nstr[nidx] + r.nodes[moved] = nidx +} + +func xorshiftMult64(x uint64) uint64 { + x ^= x >> 12 // a + x ^= x << 25 // b + x ^= x >> 27 // c + return x * 2685821657736338717 +} diff --git a/vendor/github.com/dgryski/go-rendezvous/ya.make b/vendor/github.com/dgryski/go-rendezvous/ya.make new file mode 100644 index 0000000000..7cc1e13a7d --- /dev/null +++ b/vendor/github.com/dgryski/go-rendezvous/ya.make @@ -0,0 +1,11 @@ +GO_LIBRARY() + +LICENSE(MIT) + +SRCS(rdv.go) + +GO_TEST_SRCS(rdv_test.go) + +END() + +RECURSE(gotest) diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go new file mode 100644 index 0000000000..a54f2f37ed --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/cluster.go @@ -0,0 +1,1750 @@ +package redis + +import ( + "context" + "crypto/tls" + "fmt" + "math" + "net" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/hashtag" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" + "github.com/go-redis/redis/v8/internal/rand" +) + +var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes") + +// ClusterOptions are used to configure a cluster client and should be +// passed to NewClusterClient. +type ClusterOptions struct { + // A seed list of host:port addresses of cluster nodes. + Addrs []string + + // NewClient creates a cluster node client with provided name and options. + NewClient func(opt *Options) *Client + + // The maximum number of retries before giving up. Command is retried + // on network errors and MOVED/ASK redirects. + // Default is 3 retries. + MaxRedirects int + + // Enables read-only commands on slave nodes. + ReadOnly bool + // Allows routing read-only commands to the closest master or slave node. + // It automatically enables ReadOnly. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // It automatically enables ReadOnly. + RouteRandomly bool + + // Optional function that returns cluster slots information. + // It is useful to manually create cluster of standalone Redis servers + // and load-balance read/write operations between master and slaves. + // It can use service like ZooKeeper to maintain configuration information + // and Cluster.ReloadState to manually trigger state reloading. + ClusterSlots func(context.Context) ([]ClusterSlot, error) + + // Following options are copied from Options struct. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + + OnConnect func(ctx context.Context, cn *Conn) error + + Username string + Password string + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). + PoolFIFO bool + + // PoolSize applies per cluster node and not for the whole cluster. + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *ClusterOptions) init() { + if opt.MaxRedirects == -1 { + opt.MaxRedirects = 0 + } else if opt.MaxRedirects == 0 { + opt.MaxRedirects = 3 + } + + if opt.RouteByLatency || opt.RouteRandomly { + opt.ReadOnly = true + } + + if opt.PoolSize == 0 { + opt.PoolSize = 5 * runtime.GOMAXPROCS(0) + } + + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + + if opt.MaxRetries == 0 { + opt.MaxRetries = -1 + } + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } + + if opt.NewClient == nil { + opt.NewClient = NewClient + } +} + +func (opt *ClusterOptions) clientOptions() *Options { + const disableIdleCheck = -1 + + return &Options{ + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + Username: opt.Username, + Password: opt.Password, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: disableIdleCheck, + + TLSConfig: opt.TLSConfig, + // If ClusterSlots is populated, then we probably have an artificial + // cluster whose nodes are not in clustering mode (otherwise there isn't + // much use for ClusterSlots config). This means we cannot execute the + // READONLY command against that node -- setting readOnly to false in such + // situations in the options below will prevent that from happening. + readOnly: opt.ReadOnly && opt.ClusterSlots == nil, + } +} + +//------------------------------------------------------------------------------ + +type clusterNode struct { + Client *Client + + latency uint32 // atomic + generation uint32 // atomic + failing uint32 // atomic +} + +func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode { + opt := clOpt.clientOptions() + opt.Addr = addr + node := clusterNode{ + Client: clOpt.NewClient(opt), + } + + node.latency = math.MaxUint32 + if clOpt.RouteByLatency { + go node.updateLatency() + } + + return &node +} + +func (n *clusterNode) String() string { + return n.Client.String() +} + +func (n *clusterNode) Close() error { + return n.Client.Close() +} + +func (n *clusterNode) updateLatency() { + const numProbe = 10 + var dur uint64 + + for i := 0; i < numProbe; i++ { + time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond) + + start := time.Now() + n.Client.Ping(context.TODO()) + dur += uint64(time.Since(start) / time.Microsecond) + } + + latency := float64(dur) / float64(numProbe) + atomic.StoreUint32(&n.latency, uint32(latency+0.5)) +} + +func (n *clusterNode) Latency() time.Duration { + latency := atomic.LoadUint32(&n.latency) + return time.Duration(latency) * time.Microsecond +} + +func (n *clusterNode) MarkAsFailing() { + atomic.StoreUint32(&n.failing, uint32(time.Now().Unix())) +} + +func (n *clusterNode) Failing() bool { + const timeout = 15 // 15 seconds + + failing := atomic.LoadUint32(&n.failing) + if failing == 0 { + return false + } + if time.Now().Unix()-int64(failing) < timeout { + return true + } + atomic.StoreUint32(&n.failing, 0) + return false +} + +func (n *clusterNode) Generation() uint32 { + return atomic.LoadUint32(&n.generation) +} + +func (n *clusterNode) SetGeneration(gen uint32) { + for { + v := atomic.LoadUint32(&n.generation) + if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) { + break + } + } +} + +//------------------------------------------------------------------------------ + +type clusterNodes struct { + opt *ClusterOptions + + mu sync.RWMutex + addrs []string + nodes map[string]*clusterNode + activeAddrs []string + closed bool + + _generation uint32 // atomic +} + +func newClusterNodes(opt *ClusterOptions) *clusterNodes { + return &clusterNodes{ + opt: opt, + + addrs: opt.Addrs, + nodes: make(map[string]*clusterNode), + } +} + +func (c *clusterNodes) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, node := range c.nodes { + if err := node.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + + c.nodes = nil + c.activeAddrs = nil + + return firstErr +} + +func (c *clusterNodes) Addrs() ([]string, error) { + var addrs []string + + c.mu.RLock() + closed := c.closed //nolint:ifshort + if !closed { + if len(c.activeAddrs) > 0 { + addrs = c.activeAddrs + } else { + addrs = c.addrs + } + } + c.mu.RUnlock() + + if closed { + return nil, pool.ErrClosed + } + if len(addrs) == 0 { + return nil, errClusterNoNodes + } + return addrs, nil +} + +func (c *clusterNodes) NextGeneration() uint32 { + return atomic.AddUint32(&c._generation, 1) +} + +// GC removes unused nodes. +func (c *clusterNodes) GC(generation uint32) { + //nolint:prealloc + var collected []*clusterNode + + c.mu.Lock() + + c.activeAddrs = c.activeAddrs[:0] + for addr, node := range c.nodes { + if node.Generation() >= generation { + c.activeAddrs = append(c.activeAddrs, addr) + if c.opt.RouteByLatency { + go node.updateLatency() + } + continue + } + + delete(c.nodes, addr) + collected = append(collected, node) + } + + c.mu.Unlock() + + for _, node := range collected { + _ = node.Client.Close() + } +} + +func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) { + node, err := c.get(addr) + if err != nil { + return nil, err + } + if node != nil { + return node, nil + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + node, ok := c.nodes[addr] + if ok { + return node, nil + } + + node = newClusterNode(c.opt, addr) + + c.addrs = appendIfNotExists(c.addrs, addr) + c.nodes[addr] = node + + return node, nil +} + +func (c *clusterNodes) get(addr string) (*clusterNode, error) { + var node *clusterNode + var err error + c.mu.RLock() + if c.closed { + err = pool.ErrClosed + } else { + node = c.nodes[addr] + } + c.mu.RUnlock() + return node, err +} + +func (c *clusterNodes) All() ([]*clusterNode, error) { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.closed { + return nil, pool.ErrClosed + } + + cp := make([]*clusterNode, 0, len(c.nodes)) + for _, node := range c.nodes { + cp = append(cp, node) + } + return cp, nil +} + +func (c *clusterNodes) Random() (*clusterNode, error) { + addrs, err := c.Addrs() + if err != nil { + return nil, err + } + + n := rand.Intn(len(addrs)) + return c.GetOrCreate(addrs[n]) +} + +//------------------------------------------------------------------------------ + +type clusterSlot struct { + start, end int + nodes []*clusterNode +} + +type clusterSlotSlice []*clusterSlot + +func (p clusterSlotSlice) Len() int { + return len(p) +} + +func (p clusterSlotSlice) Less(i, j int) bool { + return p[i].start < p[j].start +} + +func (p clusterSlotSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +type clusterState struct { + nodes *clusterNodes + Masters []*clusterNode + Slaves []*clusterNode + + slots []*clusterSlot + + generation uint32 + createdAt time.Time +} + +func newClusterState( + nodes *clusterNodes, slots []ClusterSlot, origin string, +) (*clusterState, error) { + c := clusterState{ + nodes: nodes, + + slots: make([]*clusterSlot, 0, len(slots)), + + generation: nodes.NextGeneration(), + createdAt: time.Now(), + } + + originHost, _, _ := net.SplitHostPort(origin) + isLoopbackOrigin := isLoopback(originHost) + + for _, slot := range slots { + var nodes []*clusterNode + for i, slotNode := range slot.Nodes { + addr := slotNode.Addr + if !isLoopbackOrigin { + addr = replaceLoopbackHost(addr, originHost) + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return nil, err + } + + node.SetGeneration(c.generation) + nodes = append(nodes, node) + + if i == 0 { + c.Masters = appendUniqueNode(c.Masters, node) + } else { + c.Slaves = appendUniqueNode(c.Slaves, node) + } + } + + c.slots = append(c.slots, &clusterSlot{ + start: slot.Start, + end: slot.End, + nodes: nodes, + }) + } + + sort.Sort(clusterSlotSlice(c.slots)) + + time.AfterFunc(time.Minute, func() { + nodes.GC(c.generation) + }) + + return &c, nil +} + +func replaceLoopbackHost(nodeAddr, originHost string) string { + nodeHost, nodePort, err := net.SplitHostPort(nodeAddr) + if err != nil { + return nodeAddr + } + + nodeIP := net.ParseIP(nodeHost) + if nodeIP == nil { + return nodeAddr + } + + if !nodeIP.IsLoopback() { + return nodeAddr + } + + // Use origin host which is not loopback and node port. + return net.JoinHostPort(originHost, nodePort) +} + +func isLoopback(host string) bool { + ip := net.ParseIP(host) + if ip == nil { + return true + } + return ip.IsLoopback() +} + +func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) > 0 { + return nodes[0], nil + } + return c.nodes.Random() +} + +func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + switch len(nodes) { + case 0: + return c.nodes.Random() + case 1: + return nodes[0], nil + case 2: + if slave := nodes[1]; !slave.Failing() { + return slave, nil + } + return nodes[0], nil + default: + var slave *clusterNode + for i := 0; i < 10; i++ { + n := rand.Intn(len(nodes)-1) + 1 + slave = nodes[n] + if !slave.Failing() { + return slave, nil + } + } + + // All slaves are loading - use master. + return nodes[0], nil + } +} + +func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + + var node *clusterNode + for _, n := range nodes { + if n.Failing() { + continue + } + if node == nil || n.Latency() < node.Latency() { + node = n + } + } + if node != nil { + return node, nil + } + + // If all nodes are failing - return random node + return c.nodes.Random() +} + +func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) { + nodes := c.slotNodes(slot) + if len(nodes) == 0 { + return c.nodes.Random() + } + if len(nodes) == 1 { + return nodes[0], nil + } + randomNodes := rand.Perm(len(nodes)) + for _, idx := range randomNodes { + if node := nodes[idx]; !node.Failing() { + return node, nil + } + } + return nodes[randomNodes[0]], nil +} + +func (c *clusterState) slotNodes(slot int) []*clusterNode { + i := sort.Search(len(c.slots), func(i int) bool { + return c.slots[i].end >= slot + }) + if i >= len(c.slots) { + return nil + } + x := c.slots[i] + if slot >= x.start && slot <= x.end { + return x.nodes + } + return nil +} + +//------------------------------------------------------------------------------ + +type clusterStateHolder struct { + load func(ctx context.Context) (*clusterState, error) + + state atomic.Value + reloading uint32 // atomic +} + +func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder { + return &clusterStateHolder{ + load: fn, + } +} + +func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) { + state, err := c.load(ctx) + if err != nil { + return nil, err + } + c.state.Store(state) + return state, nil +} + +func (c *clusterStateHolder) LazyReload() { + if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) { + return + } + go func() { + defer atomic.StoreUint32(&c.reloading, 0) + + _, err := c.Reload(context.Background()) + if err != nil { + return + } + time.Sleep(200 * time.Millisecond) + }() +} + +func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) { + v := c.state.Load() + if v == nil { + return c.Reload(ctx) + } + + state := v.(*clusterState) + if time.Since(state.createdAt) > 10*time.Second { + c.LazyReload() + } + return state, nil +} + +func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) { + state, err := c.Reload(ctx) + if err == nil { + return state, nil + } + return c.Get(ctx) +} + +//------------------------------------------------------------------------------ + +type clusterClient struct { + opt *ClusterOptions + nodes *clusterNodes + state *clusterStateHolder //nolint:structcheck + cmdsInfoCache *cmdsInfoCache //nolint:structcheck +} + +// ClusterClient is a Redis Cluster client representing a pool of zero +// or more underlying connections. It's safe for concurrent use by +// multiple goroutines. +type ClusterClient struct { + *clusterClient + cmdable + hooks + ctx context.Context +} + +// NewClusterClient returns a Redis Cluster client as described in +// http://redis.io/topics/cluster-spec. +func NewClusterClient(opt *ClusterOptions) *ClusterClient { + opt.init() + + c := &ClusterClient{ + clusterClient: &clusterClient{ + opt: opt, + nodes: newClusterNodes(opt), + }, + ctx: context.Background(), + } + c.state = newClusterStateHolder(c.loadState) + c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo) + c.cmdable = c.Process + + if opt.IdleCheckFrequency > 0 { + go c.reaper(opt.IdleCheckFrequency) + } + + return c +} + +func (c *ClusterClient) Context() context.Context { + return c.ctx +} + +func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient { + if ctx == nil { + panic("nil context") + } + clone := *c + clone.cmdable = clone.Process + clone.hooks.lock() + clone.ctx = ctx + return &clone +} + +// Options returns read-only Options that were used to create the client. +func (c *ClusterClient) Options() *ClusterOptions { + return c.opt +} + +// ReloadState reloads cluster state. If available it calls ClusterSlots func +// to get cluster slots information. +func (c *ClusterClient) ReloadState(ctx context.Context) { + c.state.LazyReload() +} + +// Close closes the cluster client, releasing any open resources. +// +// It is rare to Close a ClusterClient, as the ClusterClient is meant +// to be long-lived and shared between many goroutines. +func (c *ClusterClient) Close() error { + return c.nodes.Close() +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.process) +} + +func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error { + cmdInfo := c.cmdInfo(cmd.Name()) + slot := c.cmdSlot(cmd) + + var node *clusterNode + var ask bool + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + if node == nil { + var err error + node, err = c.cmdNode(ctx, cmdInfo, slot) + if err != nil { + return err + } + } + + if ask { + pipe := node.Client.Pipeline() + _ = pipe.Process(ctx, NewCmd(ctx, "asking")) + _ = pipe.Process(ctx, cmd) + _, lastErr = pipe.Exec(ctx) + _ = pipe.Close() + ask = false + } else { + lastErr = node.Client.Process(ctx, cmd) + } + + // If there is no error - we are done. + if lastErr == nil { + return nil + } + if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed { + if isReadOnly { + c.state.LazyReload() + } + node = nil + continue + } + + // If slave is loading - pick another node. + if c.opt.ReadOnly && isLoadingError(lastErr) { + node.MarkAsFailing() + node = nil + continue + } + + var moved bool + var addr string + moved, ask, addr = isMovedError(lastErr) + if moved || ask { + c.state.LazyReload() + + var err error + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if shouldRetry(lastErr, cmd.readTimeout() == nil) { + // First retry the same node. + if attempt == 0 { + continue + } + + // Second try another node. + node.MarkAsFailing() + node = nil + continue + } + + return lastErr + } + return lastErr +} + +// ForEachMaster concurrently calls the fn on each master node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachMaster( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + for _, master := range state.Masters { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(master) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachSlave concurrently calls the fn on each slave node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachSlave( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + for _, slave := range state.Slaves { + wg.Add(1) + go func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(slave) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// ForEachShard concurrently calls the fn on each known node in the cluster. +// It returns the first error if any. +func (c *ClusterClient) ForEachShard( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + state, err := c.state.ReloadOrGet(ctx) + if err != nil { + return err + } + + var wg sync.WaitGroup + errCh := make(chan error, 1) + + worker := func(node *clusterNode) { + defer wg.Done() + err := fn(ctx, node.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + } + + for _, node := range state.Masters { + wg.Add(1) + go worker(node) + } + for _, node := range state.Slaves { + wg.Add(1) + go worker(node) + } + + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +// PoolStats returns accumulated connection pool stats. +func (c *ClusterClient) PoolStats() *PoolStats { + var acc PoolStats + + state, _ := c.state.Get(context.TODO()) + if state == nil { + return &acc + } + + for _, node := range state.Masters { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + for _, node := range state.Slaves { + s := node.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + acc.StaleConns += s.StaleConns + } + + return &acc +} + +func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) { + if c.opt.ClusterSlots != nil { + slots, err := c.opt.ClusterSlots(ctx) + if err != nil { + return nil, err + } + return newClusterState(c.nodes, slots, "") + } + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + + for _, idx := range rand.Perm(len(addrs)) { + addr := addrs[idx] + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + slots, err := node.Client.ClusterSlots(ctx).Result() + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + return newClusterState(c.nodes, slots, node.Client.opt.Addr) + } + + /* + * No node is connectable. It's possible that all nodes' IP has changed. + * Clear activeAddrs to let client be able to re-connect using the initial + * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]), + * which might have chance to resolve domain name and get updated IP address. + */ + c.nodes.mu.Lock() + c.nodes.activeAddrs = nil + c.nodes.mu.Unlock() + + return nil, firstErr +} + +// reaper closes idle connections to the cluster. +func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) { + ticker := time.NewTicker(idleCheckFrequency) + defer ticker.Stop() + + for range ticker.C { + nodes, err := c.nodes.All() + if err != nil { + break + } + + for _, node := range nodes { + _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns() + if err != nil { + internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err) + } + } + } +} + +func (c *ClusterClient) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processPipeline, + } + pipe.init() + return &pipe +} + +func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c._processPipeline) +} + +func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error { + cmdsMap := newCmdsMap() + err := c.mapCmdsByNode(ctx, cmdsMap, cmds) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err + } + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap.m { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + err := c._processPipelineNode(ctx, node, cmds, failedCmds) + if err == nil { + return + } + if attempt < c.opt.MaxRedirects { + if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { + setCmdsErr(cmds, err) + } + } else { + setCmdsErr(cmds, err) + } + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error { + state, err := c.state.Get(ctx) + if err != nil { + return err + } + + if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) { + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + node, err := c.slotReadOnlyNode(state, slot) + if err != nil { + return err + } + cmdsMap.Add(node, cmd) + } + return nil + } + + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + node, err := state.slotMasterNode(slot) + if err != nil { + return err + } + cmdsMap.Add(node, cmd) + } + return nil +} + +func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool { + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(cmd.Name()) + if cmdInfo == nil || !cmdInfo.ReadOnly { + return false + } + } + return true +} + +func (c *ClusterClient) _processPipelineNode( + ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) error { + return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return err + } + + return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds) + }) + }) + }) +} + +func (c *ClusterClient) pipelineReadCmds( + ctx context.Context, + node *clusterNode, + rd *proto.Reader, + cmds []Cmder, + failedCmds *cmdsMap, +) error { + for _, cmd := range cmds { + err := cmd.readReply(rd) + cmd.SetErr(err) + + if err == nil { + continue + } + + if c.checkMovedErr(ctx, cmd, err, failedCmds) { + continue + } + + if c.opt.ReadOnly && isLoadingError(err) { + node.MarkAsFailing() + return err + } + if isRedisError(err) { + continue + } + return err + } + return nil +} + +func (c *ClusterClient) checkMovedErr( + ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap, +) bool { + moved, ask, addr := isMovedError(err) + if !moved && !ask { + return false + } + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return false + } + + if moved { + c.state.LazyReload() + failedCmds.Add(node, cmd) + return true + } + + if ask { + failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) + return true + } + + panic("not reached") +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *ClusterClient) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processTxPipeline, + } + pipe.init() + return &pipe +} + +func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline) +} + +func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error { + // Trim multi .. exec. + cmds = cmds[1 : len(cmds)-1] + + state, err := c.state.Get(ctx) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + cmdsMap := c.mapCmdsBySlot(cmds) + for slot, cmds := range cmdsMap { + node, err := state.slotMasterNode(slot) + if err != nil { + setCmdsErr(cmds, err) + continue + } + + cmdsMap := map[*clusterNode][]Cmder{node: cmds} + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + setCmdsErr(cmds, err) + return err + } + } + + failedCmds := newCmdsMap() + var wg sync.WaitGroup + + for node, cmds := range cmdsMap { + wg.Add(1) + go func(node *clusterNode, cmds []Cmder) { + defer wg.Done() + + err := c._processTxPipelineNode(ctx, node, cmds, failedCmds) + if err == nil { + return + } + + if attempt < c.opt.MaxRedirects { + if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil { + setCmdsErr(cmds, err) + } + } else { + setCmdsErr(cmds, err) + } + }(node, cmds) + } + + wg.Wait() + if len(failedCmds.m) == 0 { + break + } + cmdsMap = failedCmds.m + } + } + + return cmdsFirstErr(cmds) +} + +func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder { + cmdsMap := make(map[int][]Cmder) + for _, cmd := range cmds { + slot := c.cmdSlot(cmd) + cmdsMap[slot] = append(cmdsMap[slot], cmd) + } + return cmdsMap +} + +func (c *ClusterClient) _processTxPipelineNode( + ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap, +) error { + return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return err + } + + return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + statusCmd := cmds[0].(*StatusCmd) + // Trim multi and exec. + cmds = cmds[1 : len(cmds)-1] + + err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds) + if err != nil { + moved, ask, addr := isMovedError(err) + if moved || ask { + return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds) + } + return err + } + + return pipelineReadCmds(rd, cmds) + }) + }) + }) +} + +func (c *ClusterClient) txPipelineReadQueued( + ctx context.Context, + rd *proto.Reader, + statusCmd *StatusCmd, + cmds []Cmder, + failedCmds *cmdsMap, +) error { + // Parse queued replies. + if err := statusCmd.readReply(rd); err != nil { + return err + } + + for _, cmd := range cmds { + err := statusCmd.readReply(rd) + if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) { + continue + } + return err + } + + // Parse number of replies. + line, err := rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + return proto.ParseErrorReply(line) + case proto.ArrayReply: + // ok + default: + return fmt.Errorf("redis: expected '*', but got line %q", line) + } + + return nil +} + +func (c *ClusterClient) cmdsMoved( + ctx context.Context, cmds []Cmder, + moved, ask bool, + addr string, + failedCmds *cmdsMap, +) error { + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + + if moved { + c.state.LazyReload() + for _, cmd := range cmds { + failedCmds.Add(node, cmd) + } + return nil + } + + if ask { + for _, cmd := range cmds { + failedCmds.Add(node, NewCmd(ctx, "asking"), cmd) + } + return nil + } + + return nil +} + +func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + slot := hashtag.Slot(keys[0]) + for _, key := range keys[1:] { + if hashtag.Slot(key) != slot { + err := fmt.Errorf("redis: Watch requires all keys to be in the same slot") + return err + } + } + + node, err := c.slotMasterNode(ctx, slot) + if err != nil { + return err + } + + for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + err = node.Client.Watch(ctx, fn, keys...) + if err == nil { + break + } + + moved, ask, addr := isMovedError(err) + if moved || ask { + node, err = c.nodes.GetOrCreate(addr) + if err != nil { + return err + } + continue + } + + if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed { + if isReadOnly { + c.state.LazyReload() + } + node, err = c.slotMasterNode(ctx, slot) + if err != nil { + return err + } + continue + } + + if shouldRetry(err, true) { + continue + } + + return err + } + + return err +} + +func (c *ClusterClient) pubSub() *PubSub { + var node *clusterNode + pubsub := &PubSub{ + opt: c.opt.clientOptions(), + + newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + if node != nil { + panic("node != nil") + } + + var err error + if len(channels) > 0 { + slot := hashtag.Slot(channels[0]) + node, err = c.slotMasterNode(ctx, slot) + } else { + node, err = c.nodes.Random() + } + if err != nil { + return nil, err + } + + cn, err := node.Client.newConn(context.TODO()) + if err != nil { + node = nil + + return nil, err + } + + return cn, nil + }, + closeConn: func(cn *pool.Conn) error { + err := node.Client.connPool.CloseConn(cn) + node = nil + return err + }, + } + pubsub.init() + + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(ctx, channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(ctx, channels...) + } + return pubsub +} + +func (c *ClusterClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { + // Try 3 random nodes. + const nodeLimit = 3 + + addrs, err := c.nodes.Addrs() + if err != nil { + return nil, err + } + + var firstErr error + + perm := rand.Perm(len(addrs)) + if len(perm) > nodeLimit { + perm = perm[:nodeLimit] + } + + for _, idx := range perm { + addr := addrs[idx] + + node, err := c.nodes.GetOrCreate(addr) + if err != nil { + if firstErr == nil { + firstErr = err + } + continue + } + + info, err := node.Client.Command(ctx).Result() + if err == nil { + return info, nil + } + if firstErr == nil { + firstErr = err + } + } + + if firstErr == nil { + panic("not reached") + } + return nil, firstErr +} + +func (c *ClusterClient) cmdInfo(name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx) + if err != nil { + return nil + } + + info := cmdsInfo[name] + if info == nil { + internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) + } + return info +} + +func (c *ClusterClient) cmdSlot(cmd Cmder) int { + args := cmd.Args() + if args[0] == "cluster" && args[1] == "getkeysinslot" { + return args[2].(int) + } + + cmdInfo := c.cmdInfo(cmd.Name()) + return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo)) +} + +func cmdSlot(cmd Cmder, pos int) int { + if pos == 0 { + return hashtag.RandomSlot() + } + firstKey := cmd.stringArg(pos) + return hashtag.Slot(firstKey) +} + +func (c *ClusterClient) cmdNode( + ctx context.Context, + cmdInfo *CommandInfo, + slot int, +) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + + if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly { + return c.slotReadOnlyNode(state, slot) + } + return state.slotMasterNode(slot) +} + +func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) { + if c.opt.RouteByLatency { + return state.slotClosestNode(slot) + } + if c.opt.RouteRandomly { + return state.slotRandomNode(slot) + } + return state.slotSlaveNode(slot) +} + +func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + return state.slotMasterNode(slot) +} + +// SlaveForKey gets a client for a replica node to run any command on it. +// This is especially useful if we want to run a particular lua script which has +// only read only commands on the replica. +// This is because other redis commands generally have a flag that points that +// they are read only and automatically run on the replica nodes +// if ClusterOptions.ReadOnly flag is set to true. +func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) { + state, err := c.state.Get(ctx) + if err != nil { + return nil, err + } + slot := hashtag.Slot(key) + node, err := c.slotReadOnlyNode(state, slot) + if err != nil { + return nil, err + } + return node.Client, err +} + +// MasterForKey return a client to the master node for a particular key. +func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) { + slot := hashtag.Slot(key) + node, err := c.slotMasterNode(ctx, slot) + if err != nil { + return nil, err + } + return node.Client, err +} + +func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode { + for _, n := range nodes { + if n == node { + return nodes + } + } + return append(nodes, node) +} + +func appendIfNotExists(ss []string, es ...string) []string { +loop: + for _, e := range es { + for _, s := range ss { + if s == e { + continue loop + } + } + ss = append(ss, e) + } + return ss +} + +//------------------------------------------------------------------------------ + +type cmdsMap struct { + mu sync.Mutex + m map[*clusterNode][]Cmder +} + +func newCmdsMap() *cmdsMap { + return &cmdsMap{ + m: make(map[*clusterNode][]Cmder), + } +} + +func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) { + m.mu.Lock() + m.m[node] = append(m.m[node], cmds...) + m.mu.Unlock() +} diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go new file mode 100644 index 0000000000..085bce83d5 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/cluster_commands.go @@ -0,0 +1,109 @@ +package redis + +import ( + "context" + "sync" + "sync/atomic" +) + +func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "dbsize") + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + var size int64 + err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { + n, err := master.DBSize(ctx).Result() + if err != nil { + return err + } + atomic.AddInt64(&size, n) + return nil + }) + if err != nil { + cmd.SetErr(err) + } else { + cmd.val = size + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd { + cmd := NewStringCmd(ctx, "script", "load", script) + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + mu := &sync.Mutex{} + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + val, err := shard.ScriptLoad(ctx, script).Result() + if err != nil { + return err + } + + mu.Lock() + if cmd.Val() == "" { + cmd.val = val + } + mu.Unlock() + + return nil + }) + if err != nil { + cmd.SetErr(err) + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "script", "flush") + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + return shard.ScriptFlush(ctx).Err() + }) + if err != nil { + cmd.SetErr(err) + } + return nil + }) + return cmd +} + +func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd { + args := make([]interface{}, 2+len(hashes)) + args[0] = "script" + args[1] = "exists" + for i, hash := range hashes { + args[2+i] = hash + } + cmd := NewBoolSliceCmd(ctx, args...) + + result := make([]bool, len(hashes)) + for i := range result { + result[i] = true + } + + _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error { + mu := &sync.Mutex{} + err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error { + val, err := shard.ScriptExists(ctx, hashes...).Result() + if err != nil { + return err + } + + mu.Lock() + for i, v := range val { + result[i] = result[i] && v + } + mu.Unlock() + + return nil + }) + if err != nil { + cmd.SetErr(err) + } else { + cmd.val = result + } + return nil + }) + return cmd +} diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go new file mode 100644 index 0000000000..4bb12a85be --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/command.go @@ -0,0 +1,3478 @@ +package redis + +import ( + "context" + "fmt" + "net" + "strconv" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/hscan" + "github.com/go-redis/redis/v8/internal/proto" + "github.com/go-redis/redis/v8/internal/util" +) + +type Cmder interface { + Name() string + FullName() string + Args() []interface{} + String() string + stringArg(int) string + firstKeyPos() int8 + SetFirstKeyPos(int8) + + readTimeout() *time.Duration + readReply(rd *proto.Reader) error + + SetErr(error) + Err() error +} + +func setCmdsErr(cmds []Cmder, e error) { + for _, cmd := range cmds { + if cmd.Err() == nil { + cmd.SetErr(e) + } + } +} + +func cmdsFirstErr(cmds []Cmder) error { + for _, cmd := range cmds { + if err := cmd.Err(); err != nil { + return err + } + } + return nil +} + +func writeCmds(wr *proto.Writer, cmds []Cmder) error { + for _, cmd := range cmds { + if err := writeCmd(wr, cmd); err != nil { + return err + } + } + return nil +} + +func writeCmd(wr *proto.Writer, cmd Cmder) error { + return wr.WriteArgs(cmd.Args()) +} + +func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int { + if pos := cmd.firstKeyPos(); pos != 0 { + return int(pos) + } + + switch cmd.Name() { + case "eval", "evalsha": + if cmd.stringArg(2) != "0" { + return 3 + } + + return 0 + case "publish": + return 1 + case "memory": + // https://github.com/redis/redis/issues/7493 + if cmd.stringArg(1) == "usage" { + return 2 + } + } + + if info != nil { + return int(info.FirstKeyPos) + } + return 0 +} + +func cmdString(cmd Cmder, val interface{}) string { + b := make([]byte, 0, 64) + + for i, arg := range cmd.Args() { + if i > 0 { + b = append(b, ' ') + } + b = internal.AppendArg(b, arg) + } + + if err := cmd.Err(); err != nil { + b = append(b, ": "...) + b = append(b, err.Error()...) + } else if val != nil { + b = append(b, ": "...) + b = internal.AppendArg(b, val) + } + + return internal.String(b) +} + +//------------------------------------------------------------------------------ + +type baseCmd struct { + ctx context.Context + args []interface{} + err error + keyPos int8 + + _readTimeout *time.Duration +} + +var _ Cmder = (*Cmd)(nil) + +func (cmd *baseCmd) Name() string { + if len(cmd.args) == 0 { + return "" + } + // Cmd name must be lower cased. + return internal.ToLower(cmd.stringArg(0)) +} + +func (cmd *baseCmd) FullName() string { + switch name := cmd.Name(); name { + case "cluster", "command": + if len(cmd.args) == 1 { + return name + } + if s2, ok := cmd.args[1].(string); ok { + return name + " " + s2 + } + return name + default: + return name + } +} + +func (cmd *baseCmd) Args() []interface{} { + return cmd.args +} + +func (cmd *baseCmd) stringArg(pos int) string { + if pos < 0 || pos >= len(cmd.args) { + return "" + } + arg := cmd.args[pos] + switch v := arg.(type) { + case string: + return v + default: + // TODO: consider using appendArg + return fmt.Sprint(v) + } +} + +func (cmd *baseCmd) firstKeyPos() int8 { + return cmd.keyPos +} + +func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) { + cmd.keyPos = keyPos +} + +func (cmd *baseCmd) SetErr(e error) { + cmd.err = e +} + +func (cmd *baseCmd) Err() error { + return cmd.err +} + +func (cmd *baseCmd) readTimeout() *time.Duration { + return cmd._readTimeout +} + +func (cmd *baseCmd) setReadTimeout(d time.Duration) { + cmd._readTimeout = &d +} + +//------------------------------------------------------------------------------ + +type Cmd struct { + baseCmd + + val interface{} +} + +func NewCmd(ctx context.Context, args ...interface{}) *Cmd { + return &Cmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *Cmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *Cmd) SetVal(val interface{}) { + cmd.val = val +} + +func (cmd *Cmd) Val() interface{} { + return cmd.val +} + +func (cmd *Cmd) Result() (interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *Cmd) Text() (string, error) { + if cmd.err != nil { + return "", cmd.err + } + return toString(cmd.val) +} + +func toString(val interface{}) (string, error) { + switch val := val.(type) { + case string: + return val, nil + default: + err := fmt.Errorf("redis: unexpected type=%T for String", val) + return "", err + } +} + +func (cmd *Cmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + switch val := cmd.val.(type) { + case int64: + return int(val), nil + case string: + return strconv.Atoi(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int", val) + return 0, err + } +} + +func (cmd *Cmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toInt64(cmd.val) +} + +func toInt64(val interface{}) (int64, error) { + switch val := val.(type) { + case int64: + return val, nil + case string: + return strconv.ParseInt(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Int64", val) + return 0, err + } +} + +func (cmd *Cmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toUint64(cmd.val) +} + +func toUint64(val interface{}) (uint64, error) { + switch val := val.(type) { + case int64: + return uint64(val), nil + case string: + return strconv.ParseUint(val, 10, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Uint64", val) + return 0, err + } +} + +func (cmd *Cmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toFloat32(cmd.val) +} + +func toFloat32(val interface{}) (float32, error) { + switch val := val.(type) { + case int64: + return float32(val), nil + case string: + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil + default: + err := fmt.Errorf("redis: unexpected type=%T for Float32", val) + return 0, err + } +} + +func (cmd *Cmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return toFloat64(cmd.val) +} + +func toFloat64(val interface{}) (float64, error) { + switch val := val.(type) { + case int64: + return float64(val), nil + case string: + return strconv.ParseFloat(val, 64) + default: + err := fmt.Errorf("redis: unexpected type=%T for Float64", val) + return 0, err + } +} + +func (cmd *Cmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + return toBool(cmd.val) +} + +func toBool(val interface{}) (bool, error) { + switch val := val.(type) { + case int64: + return val != 0, nil + case string: + return strconv.ParseBool(val) + default: + err := fmt.Errorf("redis: unexpected type=%T for Bool", val) + return false, err + } +} + +func (cmd *Cmd) Slice() ([]interface{}, error) { + if cmd.err != nil { + return nil, cmd.err + } + switch val := cmd.val.(type) { + case []interface{}: + return val, nil + default: + return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val) + } +} + +func (cmd *Cmd) StringSlice() ([]string, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + ss := make([]string, len(slice)) + for i, iface := range slice { + val, err := toString(iface) + if err != nil { + return nil, err + } + ss[i] = val + } + return ss, nil +} + +func (cmd *Cmd) Int64Slice() ([]int64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + nums := make([]int64, len(slice)) + for i, iface := range slice { + val, err := toInt64(iface) + if err != nil { + return nil, err + } + nums[i] = val + } + return nums, nil +} + +func (cmd *Cmd) Uint64Slice() ([]uint64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + nums := make([]uint64, len(slice)) + for i, iface := range slice { + val, err := toUint64(iface) + if err != nil { + return nil, err + } + nums[i] = val + } + return nums, nil +} + +func (cmd *Cmd) Float32Slice() ([]float32, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + floats := make([]float32, len(slice)) + for i, iface := range slice { + val, err := toFloat32(iface) + if err != nil { + return nil, err + } + floats[i] = val + } + return floats, nil +} + +func (cmd *Cmd) Float64Slice() ([]float64, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + floats := make([]float64, len(slice)) + for i, iface := range slice { + val, err := toFloat64(iface) + if err != nil { + return nil, err + } + floats[i] = val + } + return floats, nil +} + +func (cmd *Cmd) BoolSlice() ([]bool, error) { + slice, err := cmd.Slice() + if err != nil { + return nil, err + } + + bools := make([]bool, len(slice)) + for i, iface := range slice { + val, err := toBool(iface) + if err != nil { + return nil, err + } + bools[i] = val + } + return bools, nil +} + +func (cmd *Cmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadReply(sliceParser) + return err +} + +// sliceParser implements proto.MultiBulkParse. +func sliceParser(rd *proto.Reader, n int64) (interface{}, error) { + vals := make([]interface{}, n) + for i := 0; i < len(vals); i++ { + v, err := rd.ReadReply(sliceParser) + if err != nil { + if err == Nil { + vals[i] = nil + continue + } + if err, ok := err.(proto.RedisError); ok { + vals[i] = err + continue + } + return nil, err + } + vals[i] = v + } + return vals, nil +} + +//------------------------------------------------------------------------------ + +type SliceCmd struct { + baseCmd + + val []interface{} +} + +var _ Cmder = (*SliceCmd)(nil) + +func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd { + return &SliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SliceCmd) SetVal(val []interface{}) { + cmd.val = val +} + +func (cmd *SliceCmd) Val() []interface{} { + return cmd.val +} + +func (cmd *SliceCmd) Result() ([]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *SliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Scan scans the results from the map into a destination struct. The map keys +// are matched in the Redis struct fields by the `redis:"field"` tag. +func (cmd *SliceCmd) Scan(dst interface{}) error { + if cmd.err != nil { + return cmd.err + } + + // Pass the list of keys and values. + // Skip the first two args for: HMGET key + var args []interface{} + if cmd.args[0] == "hmget" { + args = cmd.args[2:] + } else { + // Otherwise, it's: MGET field field ... + args = cmd.args[1:] + } + + return hscan.Scan(dst, args, cmd.val) +} + +func (cmd *SliceCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadArrayReply(sliceParser) + if err != nil { + return err + } + cmd.val = v.([]interface{}) + return nil +} + +//------------------------------------------------------------------------------ + +type StatusCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StatusCmd)(nil) + +func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd { + return &StatusCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StatusCmd) SetVal(val string) { + cmd.val = val +} + +func (cmd *StatusCmd) Val() string { + return cmd.val +} + +func (cmd *StatusCmd) Result() (string, error) { + return cmd.val, cmd.err +} + +func (cmd *StatusCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadString() + return err +} + +//------------------------------------------------------------------------------ + +type IntCmd struct { + baseCmd + + val int64 +} + +var _ Cmder = (*IntCmd)(nil) + +func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd { + return &IntCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntCmd) SetVal(val int64) { + cmd.val = val +} + +func (cmd *IntCmd) Val() int64 { + return cmd.val +} + +func (cmd *IntCmd) Result() (int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntCmd) Uint64() (uint64, error) { + return uint64(cmd.val), cmd.err +} + +func (cmd *IntCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadIntReply() + return err +} + +//------------------------------------------------------------------------------ + +type IntSliceCmd struct { + baseCmd + + val []int64 +} + +var _ Cmder = (*IntSliceCmd)(nil) + +func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd { + return &IntSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *IntSliceCmd) SetVal(val []int64) { + cmd.val = val +} + +func (cmd *IntSliceCmd) Val() []int64 { + return cmd.val +} + +func (cmd *IntSliceCmd) Result() ([]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *IntSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]int64, n) + for i := 0; i < len(cmd.val); i++ { + num, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.val[i] = num + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type DurationCmd struct { + baseCmd + + val time.Duration + precision time.Duration +} + +var _ Cmder = (*DurationCmd)(nil) + +func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd { + return &DurationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + precision: precision, + } +} + +func (cmd *DurationCmd) SetVal(val time.Duration) { + cmd.val = val +} + +func (cmd *DurationCmd) Val() time.Duration { + return cmd.val +} + +func (cmd *DurationCmd) Result() (time.Duration, error) { + return cmd.val, cmd.err +} + +func (cmd *DurationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *DurationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadIntReply() + if err != nil { + return err + } + switch n { + // -2 if the key does not exist + // -1 if the key exists but has no associated expire + case -2, -1: + cmd.val = time.Duration(n) + default: + cmd.val = time.Duration(n) * cmd.precision + } + return nil +} + +//------------------------------------------------------------------------------ + +type TimeCmd struct { + baseCmd + + val time.Time +} + +var _ Cmder = (*TimeCmd)(nil) + +func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd { + return &TimeCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *TimeCmd) SetVal(val time.Time) { + cmd.val = val +} + +func (cmd *TimeCmd) Val() time.Time { + return cmd.val +} + +func (cmd *TimeCmd) Result() (time.Time, error) { + return cmd.val, cmd.err +} + +func (cmd *TimeCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *TimeCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d elements, expected 2", n) + } + + sec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + microsec, err := rd.ReadInt() + if err != nil { + return nil, err + } + + cmd.val = time.Unix(sec, microsec*1000) + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type BoolCmd struct { + baseCmd + + val bool +} + +var _ Cmder = (*BoolCmd)(nil) + +func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd { + return &BoolCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolCmd) SetVal(val bool) { + cmd.val = val +} + +func (cmd *BoolCmd) Val() bool { + return cmd.val +} + +func (cmd *BoolCmd) Result() (bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadReply(nil) + // `SET key value NX` returns nil when key already exists. But + // `SETNX key value` returns bool (0/1). So convert nil to bool. + if err == Nil { + cmd.val = false + return nil + } + if err != nil { + return err + } + switch v := v.(type) { + case int64: + cmd.val = v == 1 + return nil + case string: + cmd.val = v == "OK" + return nil + default: + return fmt.Errorf("got %T, wanted int64 or string", v) + } +} + +//------------------------------------------------------------------------------ + +type StringCmd struct { + baseCmd + + val string +} + +var _ Cmder = (*StringCmd)(nil) + +func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd { + return &StringCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringCmd) SetVal(val string) { + cmd.val = val +} + +func (cmd *StringCmd) Val() string { + return cmd.val +} + +func (cmd *StringCmd) Result() (string, error) { + return cmd.Val(), cmd.err +} + +func (cmd *StringCmd) Bytes() ([]byte, error) { + return util.StringToBytes(cmd.val), cmd.err +} + +func (cmd *StringCmd) Bool() (bool, error) { + if cmd.err != nil { + return false, cmd.err + } + return strconv.ParseBool(cmd.val) +} + +func (cmd *StringCmd) Int() (int, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.Atoi(cmd.Val()) +} + +func (cmd *StringCmd) Int64() (int64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseInt(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Uint64() (uint64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseUint(cmd.Val(), 10, 64) +} + +func (cmd *StringCmd) Float32() (float32, error) { + if cmd.err != nil { + return 0, cmd.err + } + f, err := strconv.ParseFloat(cmd.Val(), 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +func (cmd *StringCmd) Float64() (float64, error) { + if cmd.err != nil { + return 0, cmd.err + } + return strconv.ParseFloat(cmd.Val(), 64) +} + +func (cmd *StringCmd) Time() (time.Time, error) { + if cmd.err != nil { + return time.Time{}, cmd.err + } + return time.Parse(time.RFC3339Nano, cmd.Val()) +} + +func (cmd *StringCmd) Scan(val interface{}) error { + if cmd.err != nil { + return cmd.err + } + return proto.Scan([]byte(cmd.val), val) +} + +func (cmd *StringCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadString() + return err +} + +//------------------------------------------------------------------------------ + +type FloatCmd struct { + baseCmd + + val float64 +} + +var _ Cmder = (*FloatCmd)(nil) + +func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd { + return &FloatCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatCmd) SetVal(val float64) { + cmd.val = val +} + +func (cmd *FloatCmd) Val() float64 { + return cmd.val +} + +func (cmd *FloatCmd) Result() (float64, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *FloatCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) { + cmd.val, err = rd.ReadFloatReply() + return err +} + +//------------------------------------------------------------------------------ + +type FloatSliceCmd struct { + baseCmd + + val []float64 +} + +var _ Cmder = (*FloatSliceCmd)(nil) + +func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd { + return &FloatSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FloatSliceCmd) SetVal(val []float64) { + cmd.val = val +} + +func (cmd *FloatSliceCmd) Val() []float64 { + return cmd.val +} + +func (cmd *FloatSliceCmd) Result() ([]float64, error) { + return cmd.val, cmd.err +} + +func (cmd *FloatSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]float64, n) + for i := 0; i < len(cmd.val); i++ { + switch num, err := rd.ReadFloatReply(); { + case err == Nil: + cmd.val[i] = 0 + case err != nil: + return nil, err + default: + cmd.val[i] = num + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringSliceCmd struct { + baseCmd + + val []string +} + +var _ Cmder = (*StringSliceCmd)(nil) + +func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd { + return &StringSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringSliceCmd) SetVal(val []string) { + cmd.val = val +} + +func (cmd *StringSliceCmd) Val() []string { + return cmd.val +} + +func (cmd *StringSliceCmd) Result() ([]string, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *StringSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringSliceCmd) ScanSlice(container interface{}) error { + return proto.ScanSlice(cmd.Val(), container) +} + +func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]string, n) + for i := 0; i < len(cmd.val); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.val[i] = "" + case err != nil: + return nil, err + default: + cmd.val[i] = s + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type BoolSliceCmd struct { + baseCmd + + val []bool +} + +var _ Cmder = (*BoolSliceCmd)(nil) + +func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd { + return &BoolSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *BoolSliceCmd) SetVal(val []bool) { + cmd.val = val +} + +func (cmd *BoolSliceCmd) Val() []bool { + return cmd.val +} + +func (cmd *BoolSliceCmd) Result() ([]bool, error) { + return cmd.val, cmd.err +} + +func (cmd *BoolSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]bool, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.val[i] = n == 1 + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringStringMapCmd struct { + baseCmd + + val map[string]string +} + +var _ Cmder = (*StringStringMapCmd)(nil) + +func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd { + return &StringStringMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringStringMapCmd) SetVal(val map[string]string) { + cmd.val = val +} + +func (cmd *StringStringMapCmd) Val() map[string]string { + return cmd.val +} + +func (cmd *StringStringMapCmd) Result() (map[string]string, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStringMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +// Scan scans the results from the map into a destination struct. The map keys +// are matched in the Redis struct fields by the `redis:"field"` tag. +func (cmd *StringStringMapCmd) Scan(dest interface{}) error { + if cmd.err != nil { + return cmd.err + } + + strct, err := hscan.Struct(dest) + if err != nil { + return err + } + + for k, v := range cmd.val { + if err := strct.Scan(k, v); err != nil { + return err + } + } + + return nil +} + +func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]string, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val[key] = value + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringIntMapCmd struct { + baseCmd + + val map[string]int64 +} + +var _ Cmder = (*StringIntMapCmd)(nil) + +func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd { + return &StringIntMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringIntMapCmd) SetVal(val map[string]int64) { + cmd.val = val +} + +func (cmd *StringIntMapCmd) Val() map[string]int64 { + return cmd.val +} + +func (cmd *StringIntMapCmd) Result() (map[string]int64, error) { + return cmd.val, cmd.err +} + +func (cmd *StringIntMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]int64, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + n, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + cmd.val[key] = n + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type StringStructMapCmd struct { + baseCmd + + val map[string]struct{} +} + +var _ Cmder = (*StringStructMapCmd)(nil) + +func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd { + return &StringStructMapCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) { + cmd.val = val +} + +func (cmd *StringStructMapCmd) Val() map[string]struct{} { + return cmd.val +} + +func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) { + return cmd.val, cmd.err +} + +func (cmd *StringStructMapCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]struct{}, n) + for i := int64(0); i < n; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + cmd.val[key] = struct{}{} + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XMessage struct { + ID string + Values map[string]interface{} +} + +type XMessageSliceCmd struct { + baseCmd + + val []XMessage +} + +var _ Cmder = (*XMessageSliceCmd)(nil) + +func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd { + return &XMessageSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XMessageSliceCmd) SetVal(val []XMessage) { + cmd.val = val +} + +func (cmd *XMessageSliceCmd) Val() []XMessage { + return cmd.val +} + +func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) { + return cmd.val, cmd.err +} + +func (cmd *XMessageSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error { + var err error + cmd.val, err = readXMessageSlice(rd) + return err +} + +func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + msgs := make([]XMessage, n) + for i := 0; i < n; i++ { + var err error + msgs[i], err = readXMessage(rd) + if err != nil { + return nil, err + } + } + return msgs, nil +} + +func readXMessage(rd *proto.Reader) (XMessage, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return XMessage{}, err + } + if n != 2 { + return XMessage{}, fmt.Errorf("got %d, wanted 2", n) + } + + id, err := rd.ReadString() + if err != nil { + return XMessage{}, err + } + + var values map[string]interface{} + + v, err := rd.ReadArrayReply(stringInterfaceMapParser) + if err != nil { + if err != proto.Nil { + return XMessage{}, err + } + } else { + values = v.(map[string]interface{}) + } + + return XMessage{ + ID: id, + Values: values, + }, nil +} + +// stringInterfaceMapParser implements proto.MultiBulkParse. +func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) { + m := make(map[string]interface{}, n/2) + for i := int64(0); i < n; i += 2 { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + value, err := rd.ReadString() + if err != nil { + return nil, err + } + + m[key] = value + } + return m, nil +} + +//------------------------------------------------------------------------------ + +type XStream struct { + Stream string + Messages []XMessage +} + +type XStreamSliceCmd struct { + baseCmd + + val []XStream +} + +var _ Cmder = (*XStreamSliceCmd)(nil) + +func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd { + return &XStreamSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XStreamSliceCmd) SetVal(val []XStream) { + cmd.val = val +} + +func (cmd *XStreamSliceCmd) Val() []XStream { + return cmd.val +} + +func (cmd *XStreamSliceCmd) Result() ([]XStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XStreamSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]XStream, n) + for i := 0; i < len(cmd.val); i++ { + i := i + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + stream, err := rd.ReadString() + if err != nil { + return nil, err + } + + msgs, err := readXMessageSlice(rd) + if err != nil { + return nil, err + } + + cmd.val[i] = XStream{ + Stream: stream, + Messages: msgs, + } + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XPending struct { + Count int64 + Lower string + Higher string + Consumers map[string]int64 +} + +type XPendingCmd struct { + baseCmd + val *XPending +} + +var _ Cmder = (*XPendingCmd)(nil) + +func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd { + return &XPendingCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingCmd) SetVal(val *XPending) { + cmd.val = val +} + +func (cmd *XPendingCmd) Val() *XPending { + return cmd.val +} + +func (cmd *XPendingCmd) Result() (*XPending, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + count, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + lower, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + higher, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + cmd.val = &XPending{ + Count: count, + Lower: lower, + Higher: higher, + } + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + for i := int64(0); i < n; i++ { + _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + + consumerName, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumerPending, err := rd.ReadInt() + if err != nil { + return nil, err + } + + if cmd.val.Consumers == nil { + cmd.val.Consumers = make(map[string]int64) + } + cmd.val.Consumers[consumerName] = consumerPending + + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + if err != nil && err != Nil { + return nil, err + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XPendingExt struct { + ID string + Consumer string + Idle time.Duration + RetryCount int64 +} + +type XPendingExtCmd struct { + baseCmd + val []XPendingExt +} + +var _ Cmder = (*XPendingExtCmd)(nil) + +func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd { + return &XPendingExtCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) { + cmd.val = val +} + +func (cmd *XPendingExtCmd) Val() []XPendingExt { + return cmd.val +} + +func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) { + return cmd.val, cmd.err +} + +func (cmd *XPendingExtCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]XPendingExt, 0, n) + for i := int64(0); i < n; i++ { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 4 { + return nil, fmt.Errorf("got %d, wanted 4", n) + } + + id, err := rd.ReadString() + if err != nil { + return nil, err + } + + consumer, err := rd.ReadString() + if err != nil && err != Nil { + return nil, err + } + + idle, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + retryCount, err := rd.ReadIntReply() + if err != nil && err != Nil { + return nil, err + } + + cmd.val = append(cmd.val, XPendingExt{ + ID: id, + Consumer: consumer, + Idle: time.Duration(idle) * time.Millisecond, + RetryCount: retryCount, + }) + return nil, nil + }) + if err != nil { + return nil, err + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XAutoClaimCmd struct { + baseCmd + + start string + val []XMessage +} + +var _ Cmder = (*XAutoClaimCmd)(nil) + +func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd { + return &XAutoClaimCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) { + cmd.val = val + cmd.start = start +} + +func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) { + return cmd.val, cmd.start +} + +func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) { + return cmd.val, cmd.start, cmd.err +} + +func (cmd *XAutoClaimCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + var err error + + cmd.start, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val, err = readXMessageSlice(rd) + if err != nil { + return nil, err + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XAutoClaimJustIDCmd struct { + baseCmd + + start string + val []string +} + +var _ Cmder = (*XAutoClaimJustIDCmd)(nil) + +func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd { + return &XAutoClaimJustIDCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) { + cmd.val = val + cmd.start = start +} + +func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) { + return cmd.val, cmd.start +} + +func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) { + return cmd.val, cmd.start, cmd.err +} + +func (cmd *XAutoClaimJustIDCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 2 { + return nil, fmt.Errorf("got %d, wanted 2", n) + } + var err error + + cmd.start, err = rd.ReadString() + if err != nil { + return nil, err + } + + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + cmd.val = make([]string, nn) + for i := 0; i < nn; i++ { + cmd.val[i], err = rd.ReadString() + if err != nil { + return nil, err + } + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type XInfoConsumersCmd struct { + baseCmd + val []XInfoConsumer +} + +type XInfoConsumer struct { + Name string + Pending int64 + Idle int64 +} + +var _ Cmder = (*XInfoConsumersCmd)(nil) + +func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd { + return &XInfoConsumersCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "consumers", stream, group}, + }, + } +} + +func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) { + cmd.val = val +} + +func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer { + return cmd.val +} + +func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoConsumersCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]XInfoConsumer, n) + + for i := 0; i < n; i++ { + cmd.val[i], err = readXConsumerInfo(rd) + if err != nil { + return err + } + } + + return nil +} + +func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) { + var consumer XInfoConsumer + + n, err := rd.ReadArrayLen() + if err != nil { + return consumer, err + } + if n != 6 { + return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n) + } + + for i := 0; i < 3; i++ { + key, err := rd.ReadString() + if err != nil { + return consumer, err + } + + val, err := rd.ReadString() + if err != nil { + return consumer, err + } + + switch key { + case "name": + consumer.Name = val + case "pending": + consumer.Pending, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return consumer, err + } + case "idle": + consumer.Idle, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return consumer, err + } + default: + return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key) + } + } + + return consumer, nil +} + +//------------------------------------------------------------------------------ + +type XInfoGroupsCmd struct { + baseCmd + val []XInfoGroup +} + +type XInfoGroup struct { + Name string + Consumers int64 + Pending int64 + LastDeliveredID string +} + +var _ Cmder = (*XInfoGroupsCmd)(nil) + +func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd { + return &XInfoGroupsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "groups", stream}, + }, + } +} + +func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) { + cmd.val = val +} + +func (cmd *XInfoGroupsCmd) Val() []XInfoGroup { + return cmd.val +} + +func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoGroupsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]XInfoGroup, n) + + for i := 0; i < n; i++ { + cmd.val[i], err = readXGroupInfo(rd) + if err != nil { + return err + } + } + + return nil +} + +func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) { + var group XInfoGroup + + n, err := rd.ReadArrayLen() + if err != nil { + return group, err + } + if n != 8 { + return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n) + } + + for i := 0; i < 4; i++ { + key, err := rd.ReadString() + if err != nil { + return group, err + } + + val, err := rd.ReadString() + if err != nil { + return group, err + } + + switch key { + case "name": + group.Name = val + case "consumers": + group.Consumers, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return group, err + } + case "pending": + group.Pending, err = strconv.ParseInt(val, 0, 64) + if err != nil { + return group, err + } + case "last-delivered-id": + group.LastDeliveredID = val + default: + return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key) + } + } + + return group, nil +} + +//------------------------------------------------------------------------------ + +type XInfoStreamCmd struct { + baseCmd + val *XInfoStream +} + +type XInfoStream struct { + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + Groups int64 + LastGeneratedID string + FirstEntry XMessage + LastEntry XMessage +} + +var _ Cmder = (*XInfoStreamCmd)(nil) + +func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd { + return &XInfoStreamCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: []interface{}{"xinfo", "stream", stream}, + }, + } +} + +func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) { + cmd.val = val +} + +func (cmd *XInfoStreamCmd) Val() *XInfoStream { + return cmd.val +} + +func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoStreamCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadReply(xStreamInfoParser) + if err != nil { + return err + } + cmd.val = v.(*XInfoStream) + return nil +} + +func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + if n != 14 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ + "wanted 14", n) + } + var info XInfoStream + for i := 0; i < 7; i++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + switch key { + case "length": + info.Length, err = rd.ReadIntReply() + case "radix-tree-keys": + info.RadixTreeKeys, err = rd.ReadIntReply() + case "radix-tree-nodes": + info.RadixTreeNodes, err = rd.ReadIntReply() + case "groups": + info.Groups, err = rd.ReadIntReply() + case "last-generated-id": + info.LastGeneratedID, err = rd.ReadString() + case "first-entry": + info.FirstEntry, err = readXMessage(rd) + if err == Nil { + err = nil + } + case "last-entry": + info.LastEntry, err = readXMessage(rd) + if err == Nil { + err = nil + } + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", key) + } + if err != nil { + return nil, err + } + } + return &info, nil +} + +//------------------------------------------------------------------------------ + +type XInfoStreamFullCmd struct { + baseCmd + val *XInfoStreamFull +} + +type XInfoStreamFull struct { + Length int64 + RadixTreeKeys int64 + RadixTreeNodes int64 + LastGeneratedID string + Entries []XMessage + Groups []XInfoStreamGroup +} + +type XInfoStreamGroup struct { + Name string + LastDeliveredID string + PelCount int64 + Pending []XInfoStreamGroupPending + Consumers []XInfoStreamConsumer +} + +type XInfoStreamGroupPending struct { + ID string + Consumer string + DeliveryTime time.Time + DeliveryCount int64 +} + +type XInfoStreamConsumer struct { + Name string + SeenTime time.Time + PelCount int64 + Pending []XInfoStreamConsumerPending +} + +type XInfoStreamConsumerPending struct { + ID string + DeliveryTime time.Time + DeliveryCount int64 +} + +var _ Cmder = (*XInfoStreamFullCmd)(nil) + +func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd { + return &XInfoStreamFullCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) { + cmd.val = val +} + +func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull { + return cmd.val +} + +func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) { + return cmd.val, cmd.err +} + +func (cmd *XInfoStreamFullCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + if n != 12 { + return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 12", n) + } + + cmd.val = &XInfoStreamFull{} + + for i := 0; i < 6; i++ { + key, err := rd.ReadString() + if err != nil { + return err + } + + switch key { + case "length": + cmd.val.Length, err = rd.ReadIntReply() + case "radix-tree-keys": + cmd.val.RadixTreeKeys, err = rd.ReadIntReply() + case "radix-tree-nodes": + cmd.val.RadixTreeNodes, err = rd.ReadIntReply() + case "last-generated-id": + cmd.val.LastGeneratedID, err = rd.ReadString() + case "entries": + cmd.val.Entries, err = readXMessageSlice(rd) + case "groups": + cmd.val.Groups, err = readStreamGroups(rd) + default: + return fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", key) + } + if err != nil { + return err + } + } + return nil +} + +func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + groups := make([]XInfoStreamGroup, 0, n) + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 10 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 10", nn) + } + + group := XInfoStreamGroup{} + + for f := 0; f < 5; f++ { + key, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch key { + case "name": + group.Name, err = rd.ReadString() + case "last-delivered-id": + group.LastDeliveredID, err = rd.ReadString() + case "pel-count": + group.PelCount, err = rd.ReadIntReply() + case "pending": + group.Pending, err = readXInfoStreamGroupPending(rd) + case "consumers": + group.Consumers, err = readXInfoStreamConsumers(rd) + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", key) + } + + if err != nil { + return nil, err + } + } + + groups = append(groups, group) + } + + return groups, nil +} + +func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + pending := make([]XInfoStreamGroupPending, 0, n) + + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 4 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 4", nn) + } + + p := XInfoStreamGroupPending{} + + p.ID, err = rd.ReadString() + if err != nil { + return nil, err + } + + p.Consumer, err = rd.ReadString() + if err != nil { + return nil, err + } + + delivery, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) + + p.DeliveryCount, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + + pending = append(pending, p) + } + + return pending, nil +} + +func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + consumers := make([]XInfoStreamConsumer, 0, n) + + for i := 0; i < n; i++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 8 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+ + "wanted 8", nn) + } + + c := XInfoStreamConsumer{} + + for f := 0; f < 4; f++ { + cKey, err := rd.ReadString() + if err != nil { + return nil, err + } + + switch cKey { + case "name": + c.Name, err = rd.ReadString() + case "seen-time": + seen, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond)) + case "pel-count": + c.PelCount, err = rd.ReadIntReply() + case "pending": + pendingNumber, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + + c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber) + + for pn := 0; pn < pendingNumber; pn++ { + nn, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if nn != 3 { + return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+ + "wanted 3", nn) + } + + p := XInfoStreamConsumerPending{} + + p.ID, err = rd.ReadString() + if err != nil { + return nil, err + } + + delivery, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond)) + + p.DeliveryCount, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + + c.Pending = append(c.Pending, p) + } + default: + return nil, fmt.Errorf("redis: unexpected content %s "+ + "in XINFO STREAM reply", cKey) + } + if err != nil { + return nil, err + } + } + consumers = append(consumers, c) + } + + return consumers, nil +} + +//------------------------------------------------------------------------------ + +type ZSliceCmd struct { + baseCmd + + val []Z +} + +var _ Cmder = (*ZSliceCmd)(nil) + +func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd { + return &ZSliceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZSliceCmd) SetVal(val []Z) { + cmd.val = val +} + +func (cmd *ZSliceCmd) Val() []Z { + return cmd.val +} + +func (cmd *ZSliceCmd) Result() ([]Z, error) { + return cmd.val, cmd.err +} + +func (cmd *ZSliceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]Z, n/2) + for i := 0; i < len(cmd.val); i++ { + member, err := rd.ReadString() + if err != nil { + return nil, err + } + + score, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + cmd.val[i] = Z{ + Member: member, + Score: score, + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type ZWithKeyCmd struct { + baseCmd + + val *ZWithKey +} + +var _ Cmder = (*ZWithKeyCmd)(nil) + +func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd { + return &ZWithKeyCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) { + cmd.val = val +} + +func (cmd *ZWithKeyCmd) Val() *ZWithKey { + return cmd.val +} + +func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ZWithKeyCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + if n != 3 { + return nil, fmt.Errorf("got %d elements, expected 3", n) + } + + cmd.val = &ZWithKey{} + var err error + + cmd.val.Key, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val.Member, err = rd.ReadString() + if err != nil { + return nil, err + } + + cmd.val.Score, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type ScanCmd struct { + baseCmd + + page []string + cursor uint64 + + process cmdable +} + +var _ Cmder = (*ScanCmd)(nil) + +func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd { + return &ScanCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + process: process, + } +} + +func (cmd *ScanCmd) SetVal(page []string, cursor uint64) { + cmd.page = page + cmd.cursor = cursor +} + +func (cmd *ScanCmd) Val() (keys []string, cursor uint64) { + return cmd.page, cmd.cursor +} + +func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) { + return cmd.page, cmd.cursor, cmd.err +} + +func (cmd *ScanCmd) String() string { + return cmdString(cmd, cmd.page) +} + +func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) { + cmd.page, cmd.cursor, err = rd.ReadScanReply() + return err +} + +// Iterator creates a new ScanIterator. +func (cmd *ScanCmd) Iterator() *ScanIterator { + return &ScanIterator{ + cmd: cmd, + } +} + +//------------------------------------------------------------------------------ + +type ClusterNode struct { + ID string + Addr string +} + +type ClusterSlot struct { + Start int + End int + Nodes []ClusterNode +} + +type ClusterSlotsCmd struct { + baseCmd + + val []ClusterSlot +} + +var _ Cmder = (*ClusterSlotsCmd)(nil) + +func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd { + return &ClusterSlotsCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) { + cmd.val = val +} + +func (cmd *ClusterSlotsCmd) Val() []ClusterSlot { + return cmd.val +} + +func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *ClusterSlotsCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]ClusterSlot, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n < 2 { + err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n) + return nil, err + } + + start, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + end, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + nodes := make([]ClusterNode, n-2) + for j := 0; j < len(nodes); j++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 && n != 3 { + err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n) + return nil, err + } + + ip, err := rd.ReadString() + if err != nil { + return nil, err + } + + port, err := rd.ReadString() + if err != nil { + return nil, err + } + + nodes[j].Addr = net.JoinHostPort(ip, port) + + if n == 3 { + id, err := rd.ReadString() + if err != nil { + return nil, err + } + nodes[j].ID = id + } + } + + cmd.val[i] = ClusterSlot{ + Start: int(start), + End: int(end), + Nodes: nodes, + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +// GeoLocation is used with GeoAdd to add geospatial location. +type GeoLocation struct { + Name string + Longitude, Latitude, Dist float64 + GeoHash int64 +} + +// GeoRadiusQuery is used with GeoRadius to query geospatial index. +type GeoRadiusQuery struct { + Radius float64 + // Can be m, km, ft, or mi. Default is km. + Unit string + WithCoord bool + WithDist bool + WithGeoHash bool + Count int + // Can be ASC or DESC. Default is no sort order. + Sort string + Store string + StoreDist string +} + +type GeoLocationCmd struct { + baseCmd + + q *GeoRadiusQuery + locations []GeoLocation +} + +var _ Cmder = (*GeoLocationCmd)(nil) + +func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd { + return &GeoLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: geoLocationArgs(q, args...), + }, + q: q, + } +} + +func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} { + args = append(args, q.Radius) + if q.Unit != "" { + args = append(args, q.Unit) + } else { + args = append(args, "km") + } + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithGeoHash { + args = append(args, "withhash") + } + if q.Count > 0 { + args = append(args, "count", q.Count) + } + if q.Sort != "" { + args = append(args, q.Sort) + } + if q.Store != "" { + args = append(args, "store") + args = append(args, q.Store) + } + if q.StoreDist != "" { + args = append(args, "storedist") + args = append(args, q.StoreDist) + } + return args +} + +func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) { + cmd.locations = locations +} + +func (cmd *GeoLocationCmd) Val() []GeoLocation { + return cmd.locations +} + +func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) { + return cmd.locations, cmd.err +} + +func (cmd *GeoLocationCmd) String() string { + return cmdString(cmd, cmd.locations) +} + +func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error { + v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q)) + if err != nil { + return err + } + cmd.locations = v.([]GeoLocation) + return nil +} + +func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + locs := make([]GeoLocation, 0, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(newGeoLocationParser(q)) + if err != nil { + return nil, err + } + switch vv := v.(type) { + case string: + locs = append(locs, GeoLocation{ + Name: vv, + }) + case *GeoLocation: + // TODO: avoid copying + locs = append(locs, *vv) + default: + return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v) + } + } + return locs, nil + } +} + +func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse { + return func(rd *proto.Reader, n int64) (interface{}, error) { + var loc GeoLocation + var err error + + loc.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + if q.WithDist { + loc.Dist, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + if q.WithGeoHash { + loc.GeoHash, err = rd.ReadIntReply() + if err != nil { + return nil, err + } + } + if q.WithCoord { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n != 2 { + return nil, fmt.Errorf("got %d coordinates, expected 2", n) + } + + loc.Longitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + loc.Latitude, err = rd.ReadFloatReply() + if err != nil { + return nil, err + } + } + + return &loc, nil + } +} + +//------------------------------------------------------------------------------ + +// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query. +type GeoSearchQuery struct { + Member string + + // Latitude and Longitude when using FromLonLat option. + Longitude float64 + Latitude float64 + + // Distance and unit when using ByRadius option. + // Can use m, km, ft, or mi. Default is km. + Radius float64 + RadiusUnit string + + // Height, width and unit when using ByBox option. + // Can be m, km, ft, or mi. Default is km. + BoxWidth float64 + BoxHeight float64 + BoxUnit string + + // Can be ASC or DESC. Default is no sort order. + Sort string + Count int + CountAny bool +} + +type GeoSearchLocationQuery struct { + GeoSearchQuery + + WithCoord bool + WithDist bool + WithHash bool +} + +type GeoSearchStoreQuery struct { + GeoSearchQuery + + // When using the StoreDist option, the command stores the items in a + // sorted set populated with their distance from the center of the circle or box, + // as a floating-point number, in the same unit specified for that shape. + StoreDist bool +} + +func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} { + args = geoSearchArgs(&q.GeoSearchQuery, args) + + if q.WithCoord { + args = append(args, "withcoord") + } + if q.WithDist { + args = append(args, "withdist") + } + if q.WithHash { + args = append(args, "withhash") + } + + return args +} + +func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} { + if q.Member != "" { + args = append(args, "frommember", q.Member) + } else { + args = append(args, "fromlonlat", q.Longitude, q.Latitude) + } + + if q.Radius > 0 { + if q.RadiusUnit == "" { + q.RadiusUnit = "km" + } + args = append(args, "byradius", q.Radius, q.RadiusUnit) + } else { + if q.BoxUnit == "" { + q.BoxUnit = "km" + } + args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit) + } + + if q.Sort != "" { + args = append(args, q.Sort) + } + + if q.Count > 0 { + args = append(args, "count", q.Count) + if q.CountAny { + args = append(args, "any") + } + } + + return args +} + +type GeoSearchLocationCmd struct { + baseCmd + + opt *GeoSearchLocationQuery + val []GeoLocation +} + +var _ Cmder = (*GeoSearchLocationCmd)(nil) + +func NewGeoSearchLocationCmd( + ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{}, +) *GeoSearchLocationCmd { + return &GeoSearchLocationCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + opt: opt, + } +} + +func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) { + cmd.val = val +} + +func (cmd *GeoSearchLocationCmd) Val() []GeoLocation { + return cmd.val +} + +func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) { + return cmd.val, cmd.err +} + +func (cmd *GeoSearchLocationCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + cmd.val = make([]GeoLocation, n) + for i := 0; i < n; i++ { + _, err = rd.ReadArrayLen() + if err != nil { + return err + } + + var loc GeoLocation + + loc.Name, err = rd.ReadString() + if err != nil { + return err + } + if cmd.opt.WithDist { + loc.Dist, err = rd.ReadFloatReply() + if err != nil { + return err + } + } + if cmd.opt.WithHash { + loc.GeoHash, err = rd.ReadIntReply() + if err != nil { + return err + } + } + if cmd.opt.WithCoord { + nn, err := rd.ReadArrayLen() + if err != nil { + return err + } + if nn != 2 { + return fmt.Errorf("got %d coordinates, expected 2", nn) + } + + loc.Longitude, err = rd.ReadFloatReply() + if err != nil { + return err + } + loc.Latitude, err = rd.ReadFloatReply() + if err != nil { + return err + } + } + + cmd.val[i] = loc + } + + return nil +} + +//------------------------------------------------------------------------------ + +type GeoPos struct { + Longitude, Latitude float64 +} + +type GeoPosCmd struct { + baseCmd + + val []*GeoPos +} + +var _ Cmder = (*GeoPosCmd)(nil) + +func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd { + return &GeoPosCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *GeoPosCmd) SetVal(val []*GeoPos) { + cmd.val = val +} + +func (cmd *GeoPosCmd) Val() []*GeoPos { + return cmd.val +} + +func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *GeoPosCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]*GeoPos, n) + for i := 0; i < len(cmd.val); i++ { + i := i + _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + longitude, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + latitude, err := rd.ReadFloatReply() + if err != nil { + return nil, err + } + + cmd.val[i] = &GeoPos{ + Longitude: longitude, + Latitude: latitude, + } + return nil, nil + }) + if err != nil { + if err == Nil { + cmd.val[i] = nil + continue + } + return nil, err + } + } + return nil, nil + }) + return err +} + +//------------------------------------------------------------------------------ + +type CommandInfo struct { + Name string + Arity int8 + Flags []string + ACLFlags []string + FirstKeyPos int8 + LastKeyPos int8 + StepCount int8 + ReadOnly bool +} + +type CommandsInfoCmd struct { + baseCmd + + val map[string]*CommandInfo +} + +var _ Cmder = (*CommandsInfoCmd)(nil) + +func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd { + return &CommandsInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) { + cmd.val = val +} + +func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo { + return cmd.val +} + +func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *CommandsInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make(map[string]*CommandInfo, n) + for i := int64(0); i < n; i++ { + v, err := rd.ReadReply(commandInfoParser) + if err != nil { + return nil, err + } + vv := v.(*CommandInfo) + cmd.val[vv.Name] = vv + } + return nil, nil + }) + return err +} + +func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) { + const numArgRedis5 = 6 + const numArgRedis6 = 7 + + switch n { + case numArgRedis5, numArgRedis6: + // continue + default: + return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n) + } + + var cmd CommandInfo + var err error + + cmd.Name, err = rd.ReadString() + if err != nil { + return nil, err + } + + arity, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.Arity = int8(arity) + + _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.Flags = make([]string, n) + for i := 0; i < len(cmd.Flags); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.Flags[i] = "" + case err != nil: + return nil, err + default: + cmd.Flags[i] = s + } + } + return nil, nil + }) + if err != nil { + return nil, err + } + + firstKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.FirstKeyPos = int8(firstKeyPos) + + lastKeyPos, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.LastKeyPos = int8(lastKeyPos) + + stepCount, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + cmd.StepCount = int8(stepCount) + + for _, flag := range cmd.Flags { + if flag == "readonly" { + cmd.ReadOnly = true + break + } + } + + if n == numArgRedis5 { + return &cmd, nil + } + + _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.ACLFlags = make([]string, n) + for i := 0; i < len(cmd.ACLFlags); i++ { + switch s, err := rd.ReadString(); { + case err == Nil: + cmd.ACLFlags[i] = "" + case err != nil: + return nil, err + default: + cmd.ACLFlags[i] = s + } + } + return nil, nil + }) + if err != nil { + return nil, err + } + + return &cmd, nil +} + +//------------------------------------------------------------------------------ + +type cmdsInfoCache struct { + fn func(ctx context.Context) (map[string]*CommandInfo, error) + + once internal.Once + cmds map[string]*CommandInfo +} + +func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache { + return &cmdsInfoCache{ + fn: fn, + } +} + +func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) { + err := c.once.Do(func() error { + cmds, err := c.fn(ctx) + if err != nil { + return err + } + + // Extensions have cmd names in upper case. Convert them to lower case. + for k, v := range cmds { + lower := internal.ToLower(k) + if lower != k { + cmds[lower] = v + } + } + + c.cmds = cmds + return nil + }) + return c.cmds, err +} + +//------------------------------------------------------------------------------ + +type SlowLog struct { + ID int64 + Time time.Time + Duration time.Duration + Args []string + // These are also optional fields emitted only by Redis 4.0 or greater: + // https://redis.io/commands/slowlog#output-format + ClientAddr string + ClientName string +} + +type SlowLogCmd struct { + baseCmd + + val []SlowLog +} + +var _ Cmder = (*SlowLogCmd)(nil) + +func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd { + return &SlowLogCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *SlowLogCmd) SetVal(val []SlowLog) { + cmd.val = val +} + +func (cmd *SlowLogCmd) Val() []SlowLog { + return cmd.val +} + +func (cmd *SlowLogCmd) Result() ([]SlowLog, error) { + return cmd.Val(), cmd.Err() +} + +func (cmd *SlowLogCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error { + _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) { + cmd.val = make([]SlowLog, n) + for i := 0; i < len(cmd.val); i++ { + n, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if n < 4 { + err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n) + return nil, err + } + + id, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + + createdAt, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + createdAtTime := time.Unix(createdAt, 0) + + costs, err := rd.ReadIntReply() + if err != nil { + return nil, err + } + costsDuration := time.Duration(costs) * time.Microsecond + + cmdLen, err := rd.ReadArrayLen() + if err != nil { + return nil, err + } + if cmdLen < 1 { + err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen) + return nil, err + } + + cmdString := make([]string, cmdLen) + for i := 0; i < cmdLen; i++ { + cmdString[i], err = rd.ReadString() + if err != nil { + return nil, err + } + } + + var address, name string + for i := 4; i < n; i++ { + str, err := rd.ReadString() + if err != nil { + return nil, err + } + if i == 4 { + address = str + } else if i == 5 { + name = str + } + } + + cmd.val[i] = SlowLog{ + ID: id, + Time: createdAtTime, + Duration: costsDuration, + Args: cmdString, + ClientAddr: address, + ClientName: name, + } + } + return nil, nil + }) + return err +} diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go new file mode 100644 index 0000000000..bbfe089df1 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/commands.go @@ -0,0 +1,3475 @@ +package redis + +import ( + "context" + "errors" + "io" + "time" + + "github.com/go-redis/redis/v8/internal" +) + +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +// For example: +// +// rdb.Set(ctx, key, value, redis.KeepTTL) +const KeepTTL = -1 + +func usePrecise(dur time.Duration) bool { + return dur < time.Second || dur%time.Second != 0 +} + +func formatMs(ctx context.Context, dur time.Duration) int64 { + if dur > 0 && dur < time.Millisecond { + internal.Logger.Printf( + ctx, + "specified duration is %s, but minimal supported value is %s - truncating to 1ms", + dur, time.Millisecond, + ) + return 1 + } + return int64(dur / time.Millisecond) +} + +func formatSec(ctx context.Context, dur time.Duration) int64 { + if dur > 0 && dur < time.Second { + internal.Logger.Printf( + ctx, + "specified duration is %s, but minimal supported value is %s - truncating to 1s", + dur, time.Second, + ) + return 1 + } + return int64(dur / time.Second) +} + +func appendArgs(dst, src []interface{}) []interface{} { + if len(src) == 1 { + return appendArg(dst, src[0]) + } + + dst = append(dst, src...) + return dst +} + +func appendArg(dst []interface{}, arg interface{}) []interface{} { + switch arg := arg.(type) { + case []string: + for _, s := range arg { + dst = append(dst, s) + } + return dst + case []interface{}: + dst = append(dst, arg...) + return dst + case map[string]interface{}: + for k, v := range arg { + dst = append(dst, k, v) + } + return dst + case map[string]string: + for k, v := range arg { + dst = append(dst, k, v) + } + return dst + default: + return append(dst, arg) + } +} + +type Cmdable interface { + Pipeline() Pipeliner + Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) + + TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) + TxPipeline() Pipeliner + + Command(ctx context.Context) *CommandsInfoCmd + ClientGetName(ctx context.Context) *StringCmd + Echo(ctx context.Context, message interface{}) *StringCmd + Ping(ctx context.Context) *StatusCmd + Quit(ctx context.Context) *StatusCmd + Del(ctx context.Context, keys ...string) *IntCmd + Unlink(ctx context.Context, keys ...string) *IntCmd + Dump(ctx context.Context, key string) *StringCmd + Exists(ctx context.Context, keys ...string) *IntCmd + Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd + ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd + Keys(ctx context.Context, pattern string) *StringSliceCmd + Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd + Move(ctx context.Context, key string, db int) *BoolCmd + ObjectRefCount(ctx context.Context, key string) *IntCmd + ObjectEncoding(ctx context.Context, key string) *StringCmd + ObjectIdleTime(ctx context.Context, key string) *DurationCmd + Persist(ctx context.Context, key string) *BoolCmd + PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd + PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd + PTTL(ctx context.Context, key string) *DurationCmd + RandomKey(ctx context.Context) *StringCmd + Rename(ctx context.Context, key, newkey string) *StatusCmd + RenameNX(ctx context.Context, key, newkey string) *BoolCmd + Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd + RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd + Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd + SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd + SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd + Touch(ctx context.Context, keys ...string) *IntCmd + TTL(ctx context.Context, key string) *DurationCmd + Type(ctx context.Context, key string) *StatusCmd + Append(ctx context.Context, key, value string) *IntCmd + Decr(ctx context.Context, key string) *IntCmd + DecrBy(ctx context.Context, key string, decrement int64) *IntCmd + Get(ctx context.Context, key string) *StringCmd + GetRange(ctx context.Context, key string, start, end int64) *StringCmd + GetSet(ctx context.Context, key string, value interface{}) *StringCmd + GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd + GetDel(ctx context.Context, key string) *StringCmd + Incr(ctx context.Context, key string) *IntCmd + IncrBy(ctx context.Context, key string, value int64) *IntCmd + IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd + MGet(ctx context.Context, keys ...string) *SliceCmd + MSet(ctx context.Context, values ...interface{}) *StatusCmd + MSetNX(ctx context.Context, values ...interface{}) *BoolCmd + Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd + // TODO: rename to SetEx + SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd + SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd + SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd + SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd + StrLen(ctx context.Context, key string) *IntCmd + Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd + + GetBit(ctx context.Context, key string, offset int64) *IntCmd + SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd + BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd + BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd + BitOpNot(ctx context.Context, destKey string, key string) *IntCmd + BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd + BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd + + Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd + ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd + SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd + + HDel(ctx context.Context, key string, fields ...string) *IntCmd + HExists(ctx context.Context, key, field string) *BoolCmd + HGet(ctx context.Context, key, field string) *StringCmd + HGetAll(ctx context.Context, key string) *StringStringMapCmd + HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd + HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd + HKeys(ctx context.Context, key string) *StringSliceCmd + HLen(ctx context.Context, key string) *IntCmd + HMGet(ctx context.Context, key string, fields ...string) *SliceCmd + HSet(ctx context.Context, key string, values ...interface{}) *IntCmd + HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd + HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd + HVals(ctx context.Context, key string) *StringSliceCmd + HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd + + BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd + BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd + LIndex(ctx context.Context, key string, index int64) *StringCmd + LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd + LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd + LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd + LLen(ctx context.Context, key string) *IntCmd + LPop(ctx context.Context, key string) *StringCmd + LPopCount(ctx context.Context, key string, count int) *StringSliceCmd + LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd + LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd + LPush(ctx context.Context, key string, values ...interface{}) *IntCmd + LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd + LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd + LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd + LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd + RPop(ctx context.Context, key string) *StringCmd + RPopCount(ctx context.Context, key string, count int) *StringSliceCmd + RPopLPush(ctx context.Context, source, destination string) *StringCmd + RPush(ctx context.Context, key string, values ...interface{}) *IntCmd + RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd + LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd + BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd + + SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd + SCard(ctx context.Context, key string) *IntCmd + SDiff(ctx context.Context, keys ...string) *StringSliceCmd + SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd + SInter(ctx context.Context, keys ...string) *StringSliceCmd + SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd + SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd + SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd + SMembers(ctx context.Context, key string) *StringSliceCmd + SMembersMap(ctx context.Context, key string) *StringStructMapCmd + SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd + SPop(ctx context.Context, key string) *StringCmd + SPopN(ctx context.Context, key string, count int64) *StringSliceCmd + SRandMember(ctx context.Context, key string) *StringCmd + SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd + SRem(ctx context.Context, key string, members ...interface{}) *IntCmd + SUnion(ctx context.Context, keys ...string) *StringSliceCmd + SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd + + XAdd(ctx context.Context, a *XAddArgs) *StringCmd + XDel(ctx context.Context, stream string, ids ...string) *IntCmd + XLen(ctx context.Context, stream string) *IntCmd + XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd + XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd + XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd + XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd + XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd + XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd + XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd + XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd + XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd + XGroupDestroy(ctx context.Context, stream, group string) *IntCmd + XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd + XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd + XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd + XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd + XPending(ctx context.Context, stream, group string) *XPendingCmd + XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd + XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd + XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd + XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd + XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd + + // TODO: XTrim and XTrimApprox remove in v9. + XTrim(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd + XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd + XTrimMinID(ctx context.Context, key string, minID string) *IntCmd + XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd + XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd + XInfoStream(ctx context.Context, key string) *XInfoStreamCmd + XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd + XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd + + BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd + + // TODO: remove + // ZAddCh + // ZIncr + // ZAddNXCh + // ZAddXXCh + // ZIncrNX + // ZIncrXX + // in v9. + // use ZAddArgs and ZAddArgsIncr. + + ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd + ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd + ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd + ZIncr(ctx context.Context, key string, member *Z) *FloatCmd + ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd + ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd + ZCard(ctx context.Context, key string) *IntCmd + ZCount(ctx context.Context, key, min, max string) *IntCmd + ZLexCount(ctx context.Context, key, min, max string) *IntCmd + ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd + ZInter(ctx context.Context, store *ZStore) *StringSliceCmd + ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd + ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd + ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd + ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd + ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd + ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd + ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd + ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd + ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd + ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd + ZRank(ctx context.Context, key, member string) *IntCmd + ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd + ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd + ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd + ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd + ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd + ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd + ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd + ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd + ZRevRank(ctx context.Context, key, member string) *IntCmd + ZScore(ctx context.Context, key, member string) *FloatCmd + ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd + ZUnion(ctx context.Context, store ZStore) *StringSliceCmd + ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd + ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd + ZDiff(ctx context.Context, keys ...string) *StringSliceCmd + ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd + ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd + + PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd + PFCount(ctx context.Context, keys ...string) *IntCmd + PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd + + BgRewriteAOF(ctx context.Context) *StatusCmd + BgSave(ctx context.Context) *StatusCmd + ClientKill(ctx context.Context, ipPort string) *StatusCmd + ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd + ClientList(ctx context.Context) *StringCmd + ClientPause(ctx context.Context, dur time.Duration) *BoolCmd + ClientID(ctx context.Context) *IntCmd + ConfigGet(ctx context.Context, parameter string) *SliceCmd + ConfigResetStat(ctx context.Context) *StatusCmd + ConfigSet(ctx context.Context, parameter, value string) *StatusCmd + ConfigRewrite(ctx context.Context) *StatusCmd + DBSize(ctx context.Context) *IntCmd + FlushAll(ctx context.Context) *StatusCmd + FlushAllAsync(ctx context.Context) *StatusCmd + FlushDB(ctx context.Context) *StatusCmd + FlushDBAsync(ctx context.Context) *StatusCmd + Info(ctx context.Context, section ...string) *StringCmd + LastSave(ctx context.Context) *IntCmd + Save(ctx context.Context) *StatusCmd + Shutdown(ctx context.Context) *StatusCmd + ShutdownSave(ctx context.Context) *StatusCmd + ShutdownNoSave(ctx context.Context) *StatusCmd + SlaveOf(ctx context.Context, host, port string) *StatusCmd + Time(ctx context.Context) *TimeCmd + DebugObject(ctx context.Context, key string) *StringCmd + ReadOnly(ctx context.Context) *StatusCmd + ReadWrite(ctx context.Context) *StatusCmd + MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd + + Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd + EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd + ScriptFlush(ctx context.Context) *StatusCmd + ScriptKill(ctx context.Context) *StatusCmd + ScriptLoad(ctx context.Context, script string) *StringCmd + + Publish(ctx context.Context, channel string, message interface{}) *IntCmd + PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd + PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd + PubSubNumPat(ctx context.Context) *IntCmd + + ClusterSlots(ctx context.Context) *ClusterSlotsCmd + ClusterNodes(ctx context.Context) *StringCmd + ClusterMeet(ctx context.Context, host, port string) *StatusCmd + ClusterForget(ctx context.Context, nodeID string) *StatusCmd + ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd + ClusterResetSoft(ctx context.Context) *StatusCmd + ClusterResetHard(ctx context.Context) *StatusCmd + ClusterInfo(ctx context.Context) *StringCmd + ClusterKeySlot(ctx context.Context, key string) *IntCmd + ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd + ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd + ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd + ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd + ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd + ClusterSaveConfig(ctx context.Context) *StatusCmd + ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd + ClusterFailover(ctx context.Context) *StatusCmd + ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd + ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd + + GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd + GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd + GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd + GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd + GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd + GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd + GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd + GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd + GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd + GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd +} + +type StatefulCmdable interface { + Cmdable + Auth(ctx context.Context, password string) *StatusCmd + AuthACL(ctx context.Context, username, password string) *StatusCmd + Select(ctx context.Context, index int) *StatusCmd + SwapDB(ctx context.Context, index1, index2 int) *StatusCmd + ClientSetName(ctx context.Context, name string) *BoolCmd +} + +var ( + _ Cmdable = (*Client)(nil) + _ Cmdable = (*Tx)(nil) + _ Cmdable = (*Ring)(nil) + _ Cmdable = (*ClusterClient)(nil) +) + +type cmdable func(ctx context.Context, cmd Cmder) error + +type statefulCmdable func(ctx context.Context, cmd Cmder) error + +//------------------------------------------------------------------------------ + +func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd { + cmd := NewStatusCmd(ctx, "auth", password) + _ = c(ctx, cmd) + return cmd +} + +// AuthACL Perform an AUTH command, using the given user and pass. +// Should be used to authenticate the current connection with one of the connections defined in the ACL list +// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system. +func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd { + cmd := NewStatusCmd(ctx, "auth", username, password) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd { + cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond)) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd { + cmd := NewStatusCmd(ctx, "select", index) + _ = c(ctx, cmd) + return cmd +} + +func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd { + cmd := NewStatusCmd(ctx, "swapdb", index1, index2) + _ = c(ctx, cmd) + return cmd +} + +// ClientSetName assigns a name to the connection. +func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd { + cmd := NewBoolCmd(ctx, "client", "setname", name) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd { + cmd := NewCommandsInfoCmd(ctx, "command") + _ = c(ctx, cmd) + return cmd +} + +// ClientGetName returns the name of the connection. +func (c cmdable) ClientGetName(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "client", "getname") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd { + cmd := NewStringCmd(ctx, "echo", message) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Ping(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "ping") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Quit(_ context.Context) *StatusCmd { + panic("not implemented") +} + +func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "del" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unlink" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Dump(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "dump", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "exists" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "") +} + +func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "NX") +} + +func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "XX") +} + +func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "GT") +} + +func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + return c.expire(ctx, key, expiration, "LT") +} + +func (c cmdable) expire( + ctx context.Context, key string, expiration time.Duration, mode string, +) *BoolCmd { + args := make([]interface{}, 3, 4) + args[0] = "expire" + args[1] = key + args[2] = formatSec(ctx, expiration) + if mode != "" { + args = append(args, mode) + } + + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix()) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "keys", pattern) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "migrate", + host, + port, + key, + db, + formatMs(ctx, timeout), + ) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd { + cmd := NewBoolCmd(ctx, "move", key, db) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "object", "refcount", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "object", "encoding", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd { + cmd := NewBoolCmd(ctx, "persist", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd { + cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration)) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd { + cmd := NewBoolCmd( + ctx, + "pexpireat", + key, + tm.UnixNano()/int64(time.Millisecond), + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RandomKey(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "randomkey") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd { + cmd := NewStatusCmd(ctx, "rename", key, newkey) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd { + cmd := NewBoolCmd(ctx, "renamenx", key, newkey) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "restore", + key, + formatMs(ctx, ttl), + value, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "restore", + key, + formatMs(ctx, ttl), + value, + "replace", + ) + _ = c(ctx, cmd) + return cmd +} + +type Sort struct { + By string + Offset, Count int64 + Get []string + Order string + Alpha bool +} + +func (sort *Sort) args(key string) []interface{} { + args := []interface{}{"sort", key} + if sort.By != "" { + args = append(args, "by", sort.By) + } + if sort.Offset != 0 || sort.Count != 0 { + args = append(args, "limit", sort.Offset, sort.Count) + } + for _, get := range sort.Get { + args = append(args, "get", get) + } + if sort.Order != "" { + args = append(args, sort.Order) + } + if sort.Alpha { + args = append(args, "alpha") + } + return args +} + +func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, sort.args(key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd { + args := sort.args(key) + if store != "" { + args = append(args, "store", store) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd { + cmd := NewSliceCmd(ctx, sort.args(key)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, len(keys)+1) + args[0] = "touch" + for i, key := range keys { + args[i+1] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd { + cmd := NewDurationCmd(ctx, time.Second, "ttl", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Type(ctx context.Context, key string) *StatusCmd { + cmd := NewStatusCmd(ctx, "type", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd { + cmd := NewIntCmd(ctx, "append", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Decr(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "decr", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd { + cmd := NewIntCmd(ctx, "decrby", key, decrement) + _ = c(ctx, cmd) + return cmd +} + +// Get Redis `GET key` command. It returns redis.Nil error when key does not exist. +func (c cmdable) Get(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "get", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd { + cmd := NewStringCmd(ctx, "getrange", key, start, end) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd { + cmd := NewStringCmd(ctx, "getset", key, value) + _ = c(ctx, cmd) + return cmd +} + +// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist). +// Requires Redis >= 6.2.0. +func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd { + args := make([]interface{}, 0, 4) + args = append(args, "getex", key) + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == 0 { + args = append(args, "persist") + } + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// GetDel redis-server version >= 6.2.0. +func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "getdel", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Incr(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "incr", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd { + cmd := NewIntCmd(ctx, "incrby", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd { + cmd := NewFloatCmd(ctx, "incrbyfloat", key, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "mget" + for i, key := range keys { + args[1+i] = key + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// MSet is like Set but accepts multiple values: +// - MSet("key1", "value1", "key2", "value2") +// - MSet([]string{"key1", "value1", "key2", "value2"}) +// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"}) +func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd { + args := make([]interface{}, 1, 1+len(values)) + args[0] = "mset" + args = appendArgs(args, values) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// MSetNX is like SetNX but accepts multiple values: +// - MSetNX("key1", "value1", "key2", "value2") +// - MSetNX([]string{"key1", "value1", "key2", "value2"}) +// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"}) +func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd { + args := make([]interface{}, 1, 1+len(values)) + args[0] = "msetnx" + args = appendArgs(args, values) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Set Redis `SET key value [expiration]` command. +// Use expiration for `SETEX`-like behavior. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { + args := make([]interface{}, 3, 5) + args[0] = "set" + args[1] = key + args[2] = value + if expiration > 0 { + if usePrecise(expiration) { + args = append(args, "px", formatMs(ctx, expiration)) + } else { + args = append(args, "ex", formatSec(ctx, expiration)) + } + } else if expiration == KeepTTL { + args = append(args, "keepttl") + } + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetArgs provides arguments for the SetArgs function. +type SetArgs struct { + // Mode can be `NX` or `XX` or empty. + Mode string + + // Zero `TTL` or `Expiration` means that the key has no expiration time. + TTL time.Duration + ExpireAt time.Time + + // When Get is true, the command returns the old value stored at key, or nil when key did not exist. + Get bool + + // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, + // otherwise you will receive an error: (error) ERR syntax error. + KeepTTL bool +} + +// SetArgs supports all the options that the SET command supports. +// It is the alternative to the Set function when you want +// to have more control over the options. +func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd { + args := []interface{}{"set", key, value} + + if a.KeepTTL { + args = append(args, "keepttl") + } + + if !a.ExpireAt.IsZero() { + args = append(args, "exat", a.ExpireAt.Unix()) + } + if a.TTL > 0 { + if usePrecise(a.TTL) { + args = append(args, "px", formatMs(ctx, a.TTL)) + } else { + args = append(args, "ex", formatSec(ctx, a.TTL)) + } + } + + if a.Mode != "" { + args = append(args, a.Mode) + } + + if a.Get { + args = append(args, "get") + } + + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SetEX Redis `SETEX key expiration value` command. +func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd { + cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value) + _ = c(ctx, cmd) + return cmd +} + +// SetNX Redis `SET key value [expiration] NX` command. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + switch expiration { + case 0: + // Use old `SETNX` to support old Redis versions. + cmd = NewBoolCmd(ctx, "setnx", key, value) + case KeepTTL: + cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx") + default: + if usePrecise(expiration) { + cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx") + } else { + cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx") + } + } + + _ = c(ctx, cmd) + return cmd +} + +// SetXX Redis `SET key value [expiration] XX` command. +// +// Zero expiration means the key has no expiration time. +// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0, +// otherwise you will receive an error: (error) ERR syntax error. +func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd { + var cmd *BoolCmd + switch expiration { + case 0: + cmd = NewBoolCmd(ctx, "set", key, value, "xx") + case KeepTTL: + cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx") + default: + if usePrecise(expiration) { + cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx") + } else { + cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx") + } + } + + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd { + cmd := NewIntCmd(ctx, "setrange", key, offset, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "strlen", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd { + args := []interface{}{"copy", sourceKey, destKey, "DB", db} + if replace { + args = append(args, "REPLACE") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd { + cmd := NewIntCmd(ctx, "getbit", key, offset) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd { + cmd := NewIntCmd( + ctx, + "setbit", + key, + offset, + value, + ) + _ = c(ctx, cmd) + return cmd +} + +type BitCount struct { + Start, End int64 +} + +func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd { + args := []interface{}{"bitcount", key} + if bitCount != nil { + args = append( + args, + bitCount.Start, + bitCount.End, + ) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "bitop" + args[1] = op + args[2] = destKey + for i, key := range keys { + args[3+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "and", destKey, keys...) +} + +func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "or", destKey, keys...) +} + +func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd { + return c.bitOp(ctx, "xor", destKey, keys...) +} + +func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd { + return c.bitOp(ctx, "not", destKey, key) +} + +func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd { + args := make([]interface{}, 3+len(pos)) + args[0] = "bitpos" + args[1] = key + args[2] = bit + switch len(pos) { + case 0: + case 1: + args[3] = pos[0] + case 2: + args[3] = pos[0] + args[4] = pos[1] + default: + panic("too many arguments") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd { + a := make([]interface{}, 0, 2+len(args)) + a = append(a, "bitfield") + a = append(a, key) + a = append(a, args...) + cmd := NewIntSliceCmd(ctx, a...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd { + args := []interface{}{"scan", cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + if keyType != "" { + args = append(args, "type", keyType) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"sscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"hscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd { + args := []interface{}{"zscan", key, cursor} + if match != "" { + args = append(args, "match", match) + } + if count > 0 { + args = append(args, "count", count) + } + cmd := NewScanCmd(ctx, c, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hdel" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd { + cmd := NewBoolCmd(ctx, "hexists", key, field) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd { + cmd := NewStringCmd(ctx, "hget", key, field) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd { + cmd := NewStringStringMapCmd(ctx, "hgetall", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd { + cmd := NewIntCmd(ctx, "hincrby", key, field, incr) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd { + cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hkeys", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "hlen", key) + _ = c(ctx, cmd) + return cmd +} + +// HMGet returns the values for the specified fields in the hash stored at key. +// It returns an interface{} to distinguish between empty string and nil value. +func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd { + args := make([]interface{}, 2+len(fields)) + args[0] = "hmget" + args[1] = key + for i, field := range fields { + args[2+i] = field + } + cmd := NewSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HSet accepts values in following formats: +// - HSet("myhash", "key1", "value1", "key2", "value2") +// - HSet("myhash", []string{"key1", "value1", "key2", "value2"}) +// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"}) +// +// Note that it requires Redis v4 for multiple field/value pairs support. +func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "hset" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// HMSet is a deprecated version of HSet left for compatibility with Redis 3. +func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "hmset" + args[1] = key + args = appendArgs(args, values) + cmd := NewBoolCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "hsetnx", key, field, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "hvals", key) + _ = c(ctx, cmd) + return cmd +} + +// HRandField redis-server version >= 6.2.0. +func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd { + args := make([]interface{}, 0, 4) + + // Although count=0 is meaningless, redis accepts count=0. + args = append(args, "hrandfield", key, count) + if withValues { + args = append(args, "withvalues") + } + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "blpop" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewStringSliceCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "brpop" + for i, key := range keys { + args[1+i] = key + } + args[len(keys)+1] = formatSec(ctx, timeout) + cmd := NewStringSliceCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd { + cmd := NewStringCmd( + ctx, + "brpoplpush", + source, + destination, + formatSec(ctx, timeout), + ) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd { + cmd := NewStringCmd(ctx, "lindex", key, index) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LLen(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "llen", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "lpop", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "lpop", key, count) + _ = c(ctx, cmd) + return cmd +} + +type LPosArgs struct { + Rank, MaxLen int64 +} + +func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd { + args := []interface{}{"lpos", key, value} + if a.Rank != 0 { + args = append(args, "rank", a.Rank) + } + if a.MaxLen != 0 { + args = append(args, "maxlen", a.MaxLen) + } + + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd { + args := []interface{}{"lpos", key, value, "count", count} + if a.Rank != 0 { + args = append(args, "rank", a.Rank) + } + if a.MaxLen != 0 { + args = append(args, "maxlen", a.MaxLen) + } + cmd := NewIntSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "lpushx" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd( + ctx, + "lrange", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "lrem", key, count, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd { + cmd := NewStatusCmd(ctx, "lset", key, index, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd { + cmd := NewStatusCmd( + ctx, + "ltrim", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "rpop", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "rpop", key, count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd { + cmd := NewStringCmd(ctx, "rpoplpush", source, destination) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpush" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(values)) + args[0] = "rpushx" + args[1] = key + args = appendArgs(args, values) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd { + cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BLMove( + ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration, +) *StringCmd { + cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout)) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "sadd" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "scard", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sdiff" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sdiffstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sinter" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sinterstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "sismember", key, member) + _ = c(ctx, cmd) + return cmd +} + +// SMIsMember Redis `SMISMEMBER key member [member ...]` command. +func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "smismember" + args[1] = key + args = appendArgs(args, members) + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// SMembers Redis `SMEMBERS key` command output as a slice. +func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "smembers", key) + _ = c(ctx, cmd) + return cmd +} + +// SMembersMap Redis `SMEMBERS key` command output as a map. +func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd { + cmd := NewStringStructMapCmd(ctx, "smembers", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd { + cmd := NewBoolCmd(ctx, "smove", source, destination, member) + _ = c(ctx, cmd) + return cmd +} + +// SPop Redis `SPOP key` command. +func (c cmdable) SPop(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "spop", key) + _ = c(ctx, cmd) + return cmd +} + +// SPopN Redis `SPOP key count` command. +func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "spop", key, count) + _ = c(ctx, cmd) + return cmd +} + +// SRandMember Redis `SRANDMEMBER key` command. +func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "srandmember", key) + _ = c(ctx, cmd) + return cmd +} + +// SRandMemberN Redis `SRANDMEMBER key count` command. +func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "srandmember", key, count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "srem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "sunion" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "sunionstore" + args[1] = destination + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// XAddArgs accepts values in the following formats: +// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"} +// - XAddArgs.Values = []string("key1", "value1", "key2", "value2") +// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"} +// +// Note that map will not preserve the order of key-value pairs. +// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used. +type XAddArgs struct { + Stream string + NoMkStream bool + MaxLen int64 // MAXLEN N + + // Deprecated: use MaxLen+Approx, remove in v9. + MaxLenApprox int64 // MAXLEN ~ N + + MinID string + // Approx causes MaxLen and MinID to use "~" matcher (instead of "="). + Approx bool + Limit int64 + ID string + Values interface{} +} + +// XAdd a.Limit has a bug, please confirm it and use it. +// issue: https://github.com/redis/redis/issues/9046 +func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd { + args := make([]interface{}, 0, 11) + args = append(args, "xadd", a.Stream) + if a.NoMkStream { + args = append(args, "nomkstream") + } + switch { + case a.MaxLen > 0: + if a.Approx { + args = append(args, "maxlen", "~", a.MaxLen) + } else { + args = append(args, "maxlen", a.MaxLen) + } + case a.MaxLenApprox > 0: + // TODO remove in v9. + args = append(args, "maxlen", "~", a.MaxLenApprox) + case a.MinID != "": + if a.Approx { + args = append(args, "minid", "~", a.MinID) + } else { + args = append(args, "minid", a.MinID) + } + } + if a.Limit > 0 { + args = append(args, "limit", a.Limit) + } + if a.ID != "" { + args = append(args, a.ID) + } else { + args = append(args, "*") + } + args = appendArg(args, a.Values) + + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd { + args := []interface{}{"xdel", stream} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd { + cmd := NewIntCmd(ctx, "xlen", stream) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd { + cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count) + _ = c(ctx, cmd) + return cmd +} + +type XReadArgs struct { + Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 + Count int64 + Block time.Duration +} + +func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 6+len(a.Streams)) + args = append(args, "xread") + + keyPos := int8(1) + if a.Count > 0 { + args = append(args, "count") + args = append(args, a.Count) + keyPos += 2 + } + if a.Block >= 0 { + args = append(args, "block") + args = append(args, int64(a.Block/time.Millisecond)) + keyPos += 2 + } + args = append(args, "streams") + keyPos++ + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(ctx, args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + cmd.SetFirstKeyPos(keyPos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd { + return c.XRead(ctx, &XReadArgs{ + Streams: streams, + Block: -1, + }) +} + +func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd { + cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd { + cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer) + _ = c(ctx, cmd) + return cmd +} + +type XReadGroupArgs struct { + Group string + Consumer string + Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2 + Count int64 + Block time.Duration + NoAck bool +} + +func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd { + args := make([]interface{}, 0, 10+len(a.Streams)) + args = append(args, "xreadgroup", "group", a.Group, a.Consumer) + + keyPos := int8(4) + if a.Count > 0 { + args = append(args, "count", a.Count) + keyPos += 2 + } + if a.Block >= 0 { + args = append(args, "block", int64(a.Block/time.Millisecond)) + keyPos += 2 + } + if a.NoAck { + args = append(args, "noack") + keyPos++ + } + args = append(args, "streams") + keyPos++ + for _, s := range a.Streams { + args = append(args, s) + } + + cmd := NewXStreamSliceCmd(ctx, args...) + if a.Block >= 0 { + cmd.setReadTimeout(a.Block) + } + cmd.SetFirstKeyPos(keyPos) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd { + args := []interface{}{"xack", stream, group} + for _, id := range ids { + args = append(args, id) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd { + cmd := NewXPendingCmd(ctx, "xpending", stream, group) + _ = c(ctx, cmd) + return cmd +} + +type XPendingExtArgs struct { + Stream string + Group string + Idle time.Duration + Start string + End string + Count int64 + Consumer string +} + +func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd { + args := make([]interface{}, 0, 9) + args = append(args, "xpending", a.Stream, a.Group) + if a.Idle != 0 { + args = append(args, "idle", formatMs(ctx, a.Idle)) + } + args = append(args, a.Start, a.End, a.Count) + if a.Consumer != "" { + args = append(args, a.Consumer) + } + cmd := NewXPendingExtCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type XAutoClaimArgs struct { + Stream string + Group string + MinIdle time.Duration + Start string + Count int64 + Consumer string +} + +func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd { + args := xAutoClaimArgs(ctx, a) + cmd := NewXAutoClaimCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd { + args := xAutoClaimArgs(ctx, a) + args = append(args, "justid") + cmd := NewXAutoClaimJustIDCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} { + args := make([]interface{}, 0, 8) + args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start) + if a.Count > 0 { + args = append(args, "count", a.Count) + } + return args +} + +type XClaimArgs struct { + Stream string + Group string + Consumer string + MinIdle time.Duration + Messages []string +} + +func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd { + args := xClaimArgs(a) + cmd := NewXMessageSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd { + args := xClaimArgs(a) + args = append(args, "justid") + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func xClaimArgs(a *XClaimArgs) []interface{} { + args := make([]interface{}, 0, 5+len(a.Messages)) + args = append(args, + "xclaim", + a.Stream, + a.Group, a.Consumer, + int64(a.MinIdle/time.Millisecond)) + for _, id := range a.Messages { + args = append(args, id) + } + return args +} + +// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default). +// example: +// XTRIM key MAXLEN/MINID threshold LIMIT limit. +// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit. +// The redis-server version is lower than 6.2, please set limit to 0. +func (c cmdable) xTrim( + ctx context.Context, key, strategy string, + approx bool, threshold interface{}, limit int64, +) *IntCmd { + args := make([]interface{}, 0, 7) + args = append(args, "xtrim", key, strategy) + if approx { + args = append(args, "~") + } + args = append(args, threshold) + if limit > 0 { + args = append(args, "limit", limit) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// Deprecated: use XTrimMaxLen, remove in v9. +func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) +} + +// Deprecated: use XTrimMaxLenApprox, remove in v9. +func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", true, maxLen, 0) +} + +// XTrimMaxLen No `~` rules are used, `limit` cannot be used. +// cmd: XTRIM key MAXLEN maxLen +func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", false, maxLen, 0) +} + +// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it. +// issue: https://github.com/redis/redis/issues/9046 +// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit +func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd { + return c.xTrim(ctx, key, "maxlen", true, maxLen, limit) +} + +// XTrimMinID No `~` rules are used, `limit` cannot be used. +// cmd: XTRIM key MINID minID +func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd { + return c.xTrim(ctx, key, "minid", false, minID, 0) +} + +// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it. +// issue: https://github.com/redis/redis/issues/9046 +// cmd: XTRIM key MINID ~ minID LIMIT limit +func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd { + return c.xTrim(ctx, key, "minid", true, minID, limit) +} + +func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd { + cmd := NewXInfoConsumersCmd(ctx, key, group) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd { + cmd := NewXInfoGroupsCmd(ctx, key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd { + cmd := NewXInfoStreamCmd(ctx, key) + _ = c(ctx, cmd) + return cmd +} + +// XInfoStreamFull XINFO STREAM FULL [COUNT count] +// redis-server >= 6.0. +func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd { + args := make([]interface{}, 0, 6) + args = append(args, "xinfo", "stream", key, "full") + if count > 0 { + args = append(args, "count", count) + } + cmd := NewXInfoStreamFullCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// Z represents sorted set member. +type Z struct { + Score float64 + Member interface{} +} + +// ZWithKey represents sorted set member including the name of the key where it was popped. +type ZWithKey struct { + Z + Key string +} + +// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore. +type ZStore struct { + Keys []string + Weights []float64 + // Can be SUM, MIN or MAX. + Aggregate string +} + +func (z ZStore) len() (n int) { + n = len(z.Keys) + if len(z.Weights) > 0 { + n += 1 + len(z.Weights) + } + if z.Aggregate != "" { + n += 2 + } + return n +} + +func (z ZStore) appendArgs(args []interface{}) []interface{} { + for _, key := range z.Keys { + args = append(args, key) + } + if len(z.Weights) > 0 { + args = append(args, "weights") + for _, weights := range z.Weights { + args = append(args, weights) + } + } + if z.Aggregate != "" { + args = append(args, "aggregate", z.Aggregate) + } + return args +} + +// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command. +func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmax" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewZWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command. +func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd { + args := make([]interface{}, 1+len(keys)+1) + args[0] = "bzpopmin" + for i, key := range keys { + args[1+i] = key + } + args[len(args)-1] = formatSec(ctx, timeout) + cmd := NewZWithKeyCmd(ctx, args...) + cmd.setReadTimeout(timeout) + _ = c(ctx, cmd) + return cmd +} + +// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive. +type ZAddArgs struct { + NX bool + XX bool + LT bool + GT bool + Ch bool + Members []Z +} + +func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} { + a := make([]interface{}, 0, 6+2*len(args.Members)) + a = append(a, "zadd", key) + + // The GT, LT and NX options are mutually exclusive. + if args.NX { + a = append(a, "nx") + } else { + if args.XX { + a = append(a, "xx") + } + if args.GT { + a = append(a, "gt") + } else if args.LT { + a = append(a, "lt") + } + } + if args.Ch { + a = append(a, "ch") + } + if incr { + a = append(a, "incr") + } + for _, m := range args.Members { + a = append(a, m.Score) + a = append(a, m.Member) + } + return a +} + +func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd { + cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd { + cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...) + _ = c(ctx, cmd) + return cmd +} + +// TODO: Compatible with v8 api, will be removed in v9. +func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd { + args.Members = make([]Z, len(members)) + for i, m := range members { + args.Members[i] = *m + } + cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...) + _ = c(ctx, cmd) + return cmd +} + +// ZAdd Redis `ZADD key score member [score member ...]` command. +func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{}, members...) +} + +// ZAddNX Redis `ZADD key NX score member [score member ...]` command. +func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + NX: true, + }, members...) +} + +// ZAddXX Redis `ZADD key XX score member [score member ...]` command. +func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + XX: true, + }, members...) +} + +// ZAddCh Redis `ZADD key CH score member [score member ...]` command. +// Deprecated: Use +// client.ZAddArgs(ctx, ZAddArgs{ +// Ch: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + Ch: true, + }, members...) +} + +// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command. +// Deprecated: Use +// client.ZAddArgs(ctx, ZAddArgs{ +// NX: true, +// Ch: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + NX: true, + Ch: true, + }, members...) +} + +// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command. +// Deprecated: Use +// client.ZAddArgs(ctx, ZAddArgs{ +// XX: true, +// Ch: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd { + return c.zAdd(ctx, key, ZAddArgs{ + XX: true, + Ch: true, + }, members...) +} + +// ZIncr Redis `ZADD key INCR score member` command. +// Deprecated: Use +// client.ZAddArgsIncr(ctx, ZAddArgs{ +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd { + return c.ZAddArgsIncr(ctx, key, ZAddArgs{ + Members: []Z{*member}, + }) +} + +// ZIncrNX Redis `ZADD key NX INCR score member` command. +// Deprecated: Use +// client.ZAddArgsIncr(ctx, ZAddArgs{ +// NX: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd { + return c.ZAddArgsIncr(ctx, key, ZAddArgs{ + NX: true, + Members: []Z{*member}, + }) +} + +// ZIncrXX Redis `ZADD key XX INCR score member` command. +// Deprecated: Use +// client.ZAddArgsIncr(ctx, ZAddArgs{ +// XX: true, +// Members: []Z, +// }) +// remove in v9. +func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd { + return c.ZAddArgsIncr(ctx, key, ZAddArgs{ + XX: true, + Members: []Z{*member}, + }) +} + +func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "zcard", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zcount", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zlexcount", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd { + cmd := NewFloatCmd(ctx, "zincrby", key, increment, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zinterstore", destination, len(store.Keys)) + args = store.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + cmd.SetFirstKeyPos(3) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd { + args := make([]interface{}, 0, 2+store.len()) + args = append(args, "zinter", len(store.Keys)) + args = store.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zinter", len(store.Keys)) + args = store.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd { + args := make([]interface{}, 2+len(members)) + args[0] = "zmscore" + args[1] = key + for i, member := range members { + args[2+i] = member + } + cmd := NewFloatSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmax", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd { + args := []interface{}{ + "zpopmin", + key, + } + + switch len(count) { + case 0: + break + case 1: + args = append(args, count[0]) + default: + panic("too many arguments") + } + + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ZRangeArgs is all the options of the ZRange command. +// In version> 6.2.0, you can replace the(cmd): +// ZREVRANGE, +// ZRANGEBYSCORE, +// ZREVRANGEBYSCORE, +// ZRANGEBYLEX, +// ZREVRANGEBYLEX. +// Please pay attention to your redis-server version. +// +// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher. +type ZRangeArgs struct { + Key string + + // When the ByScore option is provided, the open interval(exclusive) can be set. + // By default, the score intervals specified by <Start> and <Stop> are closed (inclusive). + // It is similar to the deprecated(6.2.0+) ZRangeByScore command. + // For example: + // ZRangeArgs{ + // Key: "example-key", + // Start: "(3", + // Stop: 8, + // ByScore: true, + // } + // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8). + // + // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command. + // You can set the <Start> and <Stop> options as follows: + // ZRangeArgs{ + // Key: "example-key", + // Start: "[abc", + // Stop: "(def", + // ByLex: true, + // } + // cmd: "ZRange example-key [abc (def ByLex" + // + // For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int). + // You can read the documentation for more information: https://redis.io/commands/zrange + Start interface{} + Stop interface{} + + // The ByScore and ByLex options are mutually exclusive. + ByScore bool + ByLex bool + + Rev bool + + // limit offset count. + Offset int64 + Count int64 +} + +func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} { + // For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>. + if z.Rev && (z.ByScore || z.ByLex) { + args = append(args, z.Key, z.Stop, z.Start) + } else { + args = append(args, z.Key, z.Start, z.Stop) + } + + if z.ByScore { + args = append(args, "byscore") + } else if z.ByLex { + args = append(args, "bylex") + } + if z.Rev { + args = append(args, "rev") + } + if z.Offset != 0 || z.Count != 0 { + args = append(args, "limit", z.Offset, z.Count) + } + return args +} + +func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd { + args := make([]interface{}, 0, 9) + args = append(args, "zrange") + args = z.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd { + args := make([]interface{}, 0, 10) + args = append(args, "zrange") + args = z.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + return c.ZRangeArgs(ctx, ZRangeArgs{ + Key: key, + Start: start, + Stop: stop, + }) +} + +func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { + return c.ZRangeArgsWithScores(ctx, ZRangeArgs{ + Key: key, + Start: start, + Stop: stop, + }) +} + +type ZRangeBy struct { + Min, Max string + Offset, Count int64 +} + +func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Min, opt.Max} + if withScores { + args = append(args, "withscores") + } + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRangeBy(ctx, "zrangebyscore", key, opt, false) +} + +func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRangeBy(ctx, "zrangebylex", key, opt, false) +} + +func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd { + args := make([]interface{}, 0, 10) + args = append(args, "zrangestore", dst) + args = z.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd { + cmd := NewIntCmd(ctx, "zrank", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(members)) + args[0] = "zrem" + args[1] = key + args = appendArgs(args, members) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd { + cmd := NewIntCmd( + ctx, + "zremrangebyrank", + key, + start, + stop, + ) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd { + cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd { + cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd { + args := []interface{}{zcmd, key, opt.Max, opt.Min} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt) +} + +func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd { + return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt) +} + +func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd { + args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"} + if opt.Offset != 0 || opt.Count != 0 { + args = append( + args, + "limit", + opt.Offset, + opt.Count, + ) + } + cmd := NewZSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd { + cmd := NewIntCmd(ctx, "zrevrank", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd { + cmd := NewFloatCmd(ctx, "zscore", key, member) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd { + args := make([]interface{}, 0, 2+store.len()) + args = append(args, "zunion", len(store.Keys)) + args = store.appendArgs(args) + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zunion", len(store.Keys)) + args = store.appendArgs(args) + args = append(args, "withscores") + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd { + args := make([]interface{}, 0, 3+store.len()) + args = append(args, "zunionstore", dest, len(store.Keys)) + args = store.appendArgs(args) + cmd := NewIntCmd(ctx, args...) + cmd.SetFirstKeyPos(3) + _ = c(ctx, cmd) + return cmd +} + +// ZRandMember redis-server version >= 6.2.0. +func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd { + args := make([]interface{}, 0, 4) + + // Although count=0 is meaningless, redis accepts count=0. + args = append(args, "zrandmember", key, count) + if withScores { + args = append(args, "withscores") + } + + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// ZDiff redis-server version >= 6.2.0. +func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "zdiff" + args[1] = len(keys) + for i, key := range keys { + args[i+2] = key + } + + cmd := NewStringSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +// ZDiffWithScores redis-server version >= 6.2.0. +func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd { + args := make([]interface{}, 3+len(keys)) + args[0] = "zdiff" + args[1] = len(keys) + for i, key := range keys { + args[i+2] = key + } + args[len(keys)+2] = "withscores" + + cmd := NewZSliceCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +// ZDiffStore redis-server version >=6.2.0. +func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd { + args := make([]interface{}, 0, 3+len(keys)) + args = append(args, "zdiffstore", destination, len(keys)) + for _, key := range keys { + args = append(args, key) + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd { + args := make([]interface{}, 2, 2+len(els)) + args[0] = "pfadd" + args[1] = key + args = appendArgs(args, els) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "pfcount" + for i, key := range keys { + args[1+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "pfmerge" + args[1] = dest + for i, key := range keys { + args[2+i] = key + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "bgrewriteaof") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) BgSave(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "bgsave") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd { + cmd := NewStatusCmd(ctx, "client", "kill", ipPort) + _ = c(ctx, cmd) + return cmd +} + +// ClientKillByFilter is new style syntax, while the ClientKill is old +// +// CLIENT KILL <option> [value] ... <option> [value] +func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd { + args := make([]interface{}, 2+len(keys)) + args[0] = "client" + args[1] = "kill" + for i, key := range keys { + args[2+i] = key + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClientList(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "client", "list") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd { + cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur)) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClientID(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "client", "id") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd { + cmd := NewIntCmd(ctx, "client", "unblock", id) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd { + cmd := NewIntCmd(ctx, "client", "unblock", id, "error") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd { + cmd := NewSliceCmd(ctx, "config", "get", parameter) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "config", "resetstat") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd { + cmd := NewStatusCmd(ctx, "config", "set", parameter, value) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "config", "rewrite") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) DBSize(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "dbsize") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) FlushAll(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "flushall") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "flushall", "async") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) FlushDB(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "flushdb") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "flushdb", "async") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd { + args := []interface{}{"info"} + if len(section) > 0 { + args = append(args, section[0]) + } + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) LastSave(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "lastsave") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Save(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "save") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd { + var args []interface{} + if modifier == "" { + args = []interface{}{"shutdown"} + } else { + args = []interface{}{"shutdown", modifier} + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + if err := cmd.Err(); err != nil { + if err == io.EOF { + // Server quit as expected. + cmd.err = nil + } + } else { + // Server did not quit. String reply contains the reason. + cmd.err = errors.New(cmd.val) + cmd.val = "" + } + return cmd +} + +func (c cmdable) Shutdown(ctx context.Context) *StatusCmd { + return c.shutdown(ctx, "") +} + +func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd { + return c.shutdown(ctx, "save") +} + +func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd { + return c.shutdown(ctx, "nosave") +} + +func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd { + cmd := NewStatusCmd(ctx, "slaveof", host, port) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd { + cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) Sync(_ context.Context) { + panic("not implemented") +} + +func (c cmdable) Time(ctx context.Context) *TimeCmd { + cmd := NewTimeCmd(ctx, "time") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd { + cmd := NewStringCmd(ctx, "debug", "object", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "readonly") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "readwrite") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd { + args := []interface{}{"memory", "usage", key} + if len(samples) > 0 { + if len(samples) != 1 { + panic("MemoryUsage expects single sample count") + } + args = append(args, "SAMPLES", samples[0]) + } + cmd := NewIntCmd(ctx, args...) + cmd.SetFirstKeyPos(2) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd { + cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args)) + cmdArgs[0] = "eval" + cmdArgs[1] = script + cmdArgs[2] = len(keys) + for i, key := range keys { + cmdArgs[3+i] = key + } + cmdArgs = appendArgs(cmdArgs, args) + cmd := NewCmd(ctx, cmdArgs...) + cmd.SetFirstKeyPos(3) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd { + cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args)) + cmdArgs[0] = "evalsha" + cmdArgs[1] = sha1 + cmdArgs[2] = len(keys) + for i, key := range keys { + cmdArgs[3+i] = key + } + cmdArgs = appendArgs(cmdArgs, args) + cmd := NewCmd(ctx, cmdArgs...) + cmd.SetFirstKeyPos(3) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd { + args := make([]interface{}, 2+len(hashes)) + args[0] = "script" + args[1] = "exists" + for i, hash := range hashes { + args[2+i] = hash + } + cmd := NewBoolSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "script", "flush") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "script", "kill") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd { + cmd := NewStringCmd(ctx, "script", "load", script) + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +// Publish posts the message to the channel. +func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd { + cmd := NewIntCmd(ctx, "publish", channel, message) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd { + args := []interface{}{"pubsub", "channels"} + if pattern != "*" { + args = append(args, pattern) + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd { + args := make([]interface{}, 2+len(channels)) + args[0] = "pubsub" + args[1] = "numsub" + for i, channel := range channels { + args[2+i] = channel + } + cmd := NewStringIntMapCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd { + cmd := NewIntCmd(ctx, "pubsub", "numpat") + _ = c(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd { + cmd := NewClusterSlotsCmd(ctx, "cluster", "slots") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "cluster", "nodes") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd { + cmd := NewStatusCmd(ctx, "cluster", "meet", host, port) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd { + cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd { + cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "cluster", "reset", "soft") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "cluster", "reset", "hard") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "cluster", "info") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd { + cmd := NewIntCmd(ctx, "cluster", "keyslot", key) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd { + cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd { + cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd { + args := make([]interface{}, 2+len(slots)) + args[0] = "cluster" + args[1] = "delslots" + for i, slot := range slots { + args[2+i] = slot + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd { + size := max - min + 1 + slots := make([]int, size) + for i := 0; i < size; i++ { + slots[i] = min + i + } + return c.ClusterDelSlots(ctx, slots...) +} + +func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "cluster", "saveconfig") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "cluster", "failover") + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd { + args := make([]interface{}, 2+len(slots)) + args[0] = "cluster" + args[1] = "addslots" + for i, num := range slots { + args[2+i] = num + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd { + size := max - min + 1 + slots := make([]int, size) + for i := 0; i < size; i++ { + slots[i] = min + i + } + return c.ClusterAddSlots(ctx, slots...) +} + +//------------------------------------------------------------------------------ + +func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd { + args := make([]interface{}, 2+3*len(geoLocation)) + args[0] = "geoadd" + args[1] = key + for i, eachLoc := range geoLocation { + args[2+3*i] = eachLoc.Longitude + args[2+3*i+1] = eachLoc.Latitude + args[2+3*i+2] = eachLoc.Name + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// GeoRadius is a read-only GEORADIUS_RO command. +func (c cmdable) GeoRadius( + ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery, +) *GeoLocationCmd { + cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude) + if query.Store != "" || query.StoreDist != "" { + cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist")) + return cmd + } + _ = c(ctx, cmd) + return cmd +} + +// GeoRadiusStore is a writing GEORADIUS command. +func (c cmdable) GeoRadiusStore( + ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery, +) *IntCmd { + args := geoLocationArgs(query, "georadius", key, longitude, latitude) + cmd := NewIntCmd(ctx, args...) + if query.Store == "" && query.StoreDist == "" { + cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist")) + return cmd + } + _ = c(ctx, cmd) + return cmd +} + +// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command. +func (c cmdable) GeoRadiusByMember( + ctx context.Context, key, member string, query *GeoRadiusQuery, +) *GeoLocationCmd { + cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member) + if query.Store != "" || query.StoreDist != "" { + cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist")) + return cmd + } + _ = c(ctx, cmd) + return cmd +} + +// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command. +func (c cmdable) GeoRadiusByMemberStore( + ctx context.Context, key, member string, query *GeoRadiusQuery, +) *IntCmd { + args := geoLocationArgs(query, "georadiusbymember", key, member) + cmd := NewIntCmd(ctx, args...) + if query.Store == "" && query.StoreDist == "" { + cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist")) + return cmd + } + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd { + args := make([]interface{}, 0, 13) + args = append(args, "geosearch", key) + args = geoSearchArgs(q, args) + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GeoSearchLocation( + ctx context.Context, key string, q *GeoSearchLocationQuery, +) *GeoSearchLocationCmd { + args := make([]interface{}, 0, 16) + args = append(args, "geosearch", key) + args = geoSearchLocationArgs(q, args) + cmd := NewGeoSearchLocationCmd(ctx, q, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd { + args := make([]interface{}, 0, 15) + args = append(args, "geosearchstore", store, key) + args = geoSearchArgs(&q.GeoSearchQuery, args) + if q.StoreDist { + args = append(args, "storedist") + } + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GeoDist( + ctx context.Context, key string, member1, member2, unit string, +) *FloatCmd { + if unit == "" { + unit = "km" + } + cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd { + args := make([]interface{}, 2+len(members)) + args[0] = "geohash" + args[1] = key + for i, member := range members { + args[2+i] = member + } + cmd := NewStringSliceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd { + args := make([]interface{}, 2+len(members)) + args[0] = "geopos" + args[1] = key + for i, member := range members { + args[2+i] = member + } + cmd := NewGeoPosCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} diff --git a/vendor/github.com/go-redis/redis/v8/doc.go b/vendor/github.com/go-redis/redis/v8/doc.go new file mode 100644 index 0000000000..55262533a6 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/doc.go @@ -0,0 +1,4 @@ +/* +Package redis implements a Redis client. +*/ +package redis diff --git a/vendor/github.com/go-redis/redis/v8/error.go b/vendor/github.com/go-redis/redis/v8/error.go new file mode 100644 index 0000000000..521594bbd0 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/error.go @@ -0,0 +1,144 @@ +package redis + +import ( + "context" + "io" + "net" + "strings" + + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" +) + +// ErrClosed performs any operation on the closed client will return this error. +var ErrClosed = pool.ErrClosed + +type Error interface { + error + + // RedisError is a no-op function but + // serves to distinguish types that are Redis + // errors from ordinary errors: a type is a + // Redis error if it has a RedisError method. + RedisError() +} + +var _ Error = proto.RedisError("") + +func shouldRetry(err error, retryTimeout bool) bool { + switch err { + case io.EOF, io.ErrUnexpectedEOF: + return true + case nil, context.Canceled, context.DeadlineExceeded: + return false + } + + if v, ok := err.(timeoutError); ok { + if v.Timeout() { + return retryTimeout + } + return true + } + + s := err.Error() + if s == "ERR max number of clients reached" { + return true + } + if strings.HasPrefix(s, "LOADING ") { + return true + } + if strings.HasPrefix(s, "READONLY ") { + return true + } + if strings.HasPrefix(s, "CLUSTERDOWN ") { + return true + } + if strings.HasPrefix(s, "TRYAGAIN ") { + return true + } + + return false +} + +func isRedisError(err error) bool { + _, ok := err.(proto.RedisError) + return ok +} + +func isBadConn(err error, allowTimeout bool, addr string) bool { + switch err { + case nil: + return false + case context.Canceled, context.DeadlineExceeded: + return true + } + + if isRedisError(err) { + switch { + case isReadOnlyError(err): + // Close connections in read only state in case domain addr is used + // and domain resolves to a different Redis Server. See #790. + return true + case isMovedSameConnAddr(err, addr): + // Close connections when we are asked to move to the same addr + // of the connection. Force a DNS resolution when all connections + // of the pool are recycled + return true + default: + return false + } + } + + if allowTimeout { + if netErr, ok := err.(net.Error); ok && netErr.Timeout() { + return !netErr.Temporary() + } + } + + return true +} + +func isMovedError(err error) (moved bool, ask bool, addr string) { + if !isRedisError(err) { + return + } + + s := err.Error() + switch { + case strings.HasPrefix(s, "MOVED "): + moved = true + case strings.HasPrefix(s, "ASK "): + ask = true + default: + return + } + + ind := strings.LastIndex(s, " ") + if ind == -1 { + return false, false, "" + } + addr = s[ind+1:] + return +} + +func isLoadingError(err error) bool { + return strings.HasPrefix(err.Error(), "LOADING ") +} + +func isReadOnlyError(err error) bool { + return strings.HasPrefix(err.Error(), "READONLY ") +} + +func isMovedSameConnAddr(err error, addr string) bool { + redisError := err.Error() + if !strings.HasPrefix(redisError, "MOVED ") { + return false + } + return strings.HasSuffix(redisError, " "+addr) +} + +//------------------------------------------------------------------------------ + +type timeoutError interface { + Timeout() bool +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/arg.go b/vendor/github.com/go-redis/redis/v8/internal/arg.go new file mode 100644 index 0000000000..b97fa0d685 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/arg.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "strconv" + "time" +) + +func AppendArg(b []byte, v interface{}) []byte { + switch v := v.(type) { + case nil: + return append(b, "<nil>"...) + case string: + return appendUTF8String(b, Bytes(v)) + case []byte: + return appendUTF8String(b, v) + case int: + return strconv.AppendInt(b, int64(v), 10) + case int8: + return strconv.AppendInt(b, int64(v), 10) + case int16: + return strconv.AppendInt(b, int64(v), 10) + case int32: + return strconv.AppendInt(b, int64(v), 10) + case int64: + return strconv.AppendInt(b, v, 10) + case uint: + return strconv.AppendUint(b, uint64(v), 10) + case uint8: + return strconv.AppendUint(b, uint64(v), 10) + case uint16: + return strconv.AppendUint(b, uint64(v), 10) + case uint32: + return strconv.AppendUint(b, uint64(v), 10) + case uint64: + return strconv.AppendUint(b, v, 10) + case float32: + return strconv.AppendFloat(b, float64(v), 'f', -1, 64) + case float64: + return strconv.AppendFloat(b, v, 'f', -1, 64) + case bool: + if v { + return append(b, "true"...) + } + return append(b, "false"...) + case time.Time: + return v.AppendFormat(b, time.RFC3339Nano) + default: + return append(b, fmt.Sprint(v)...) + } +} + +func appendUTF8String(dst []byte, src []byte) []byte { + dst = append(dst, src...) + return dst +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go new file mode 100644 index 0000000000..b3a4f211e3 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go @@ -0,0 +1,78 @@ +package hashtag + +import ( + "strings" + + "github.com/go-redis/redis/v8/internal/rand" +) + +const slotNumber = 16384 + +// CRC16 implementation according to CCITT standards. +// Copyright 2001-2010 Georges Menie (www.menie.org) +// Copyright 2013 The Go Authors. All rights reserved. +// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c +var crc16tab = [256]uint16{ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, +} + +func Key(key string) string { + if s := strings.IndexByte(key, '{'); s > -1 { + if e := strings.IndexByte(key[s+1:], '}'); e > 0 { + return key[s+1 : s+e+1] + } + } + return key +} + +func RandomSlot() int { + return rand.Intn(slotNumber) +} + +// Slot returns a consistent slot number between 0 and 16383 +// for any given string key. +func Slot(key string) int { + if key == "" { + return RandomSlot() + } + key = Key(key) + return int(crc16sum(key)) % slotNumber +} + +func crc16sum(key string) (crc uint16) { + for i := 0; i < len(key); i++ { + crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff] + } + return +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/ya.make b/vendor/github.com/go-redis/redis/v8/internal/hashtag/ya.make new file mode 100644 index 0000000000..ada1b318a8 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/hashtag/ya.make @@ -0,0 +1,11 @@ +GO_LIBRARY() + +LICENSE(BSD-2-Clause) + +SRCS(hashtag.go) + +GO_TEST_SRCS(hashtag_test.go) + +END() + +RECURSE(gotest) diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go new file mode 100644 index 0000000000..852c8bd525 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go @@ -0,0 +1,201 @@ +package hscan + +import ( + "errors" + "fmt" + "reflect" + "strconv" +) + +// decoderFunc represents decoding functions for default built-in types. +type decoderFunc func(reflect.Value, string) error + +var ( + // List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1). + decoders = []decoderFunc{ + reflect.Bool: decodeBool, + reflect.Int: decodeInt, + reflect.Int8: decodeInt8, + reflect.Int16: decodeInt16, + reflect.Int32: decodeInt32, + reflect.Int64: decodeInt64, + reflect.Uint: decodeUint, + reflect.Uint8: decodeUint8, + reflect.Uint16: decodeUint16, + reflect.Uint32: decodeUint32, + reflect.Uint64: decodeUint64, + reflect.Float32: decodeFloat32, + reflect.Float64: decodeFloat64, + reflect.Complex64: decodeUnsupported, + reflect.Complex128: decodeUnsupported, + reflect.Array: decodeUnsupported, + reflect.Chan: decodeUnsupported, + reflect.Func: decodeUnsupported, + reflect.Interface: decodeUnsupported, + reflect.Map: decodeUnsupported, + reflect.Ptr: decodeUnsupported, + reflect.Slice: decodeSlice, + reflect.String: decodeString, + reflect.Struct: decodeUnsupported, + reflect.UnsafePointer: decodeUnsupported, + } + + // Global map of struct field specs that is populated once for every new + // struct type that is scanned. This caches the field types and the corresponding + // decoder functions to avoid iterating through struct fields on subsequent scans. + globalStructMap = newStructMap() +) + +func Struct(dst interface{}) (StructValue, error) { + v := reflect.ValueOf(dst) + + // The destination to scan into should be a struct pointer. + if v.Kind() != reflect.Ptr || v.IsNil() { + return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst) + } + + v = v.Elem() + if v.Kind() != reflect.Struct { + return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst) + } + + return StructValue{ + spec: globalStructMap.get(v.Type()), + value: v, + }, nil +} + +// Scan scans the results from a key-value Redis map result set to a destination struct. +// The Redis keys are matched to the struct's field with the `redis` tag. +func Scan(dst interface{}, keys []interface{}, vals []interface{}) error { + if len(keys) != len(vals) { + return errors.New("args should have the same number of keys and vals") + } + + strct, err := Struct(dst) + if err != nil { + return err + } + + // Iterate through the (key, value) sequence. + for i := 0; i < len(vals); i++ { + key, ok := keys[i].(string) + if !ok { + continue + } + + val, ok := vals[i].(string) + if !ok { + continue + } + + if err := strct.Scan(key, val); err != nil { + return err + } + } + + return nil +} + +func decodeBool(f reflect.Value, s string) error { + b, err := strconv.ParseBool(s) + if err != nil { + return err + } + f.SetBool(b) + return nil +} + +func decodeInt8(f reflect.Value, s string) error { + return decodeNumber(f, s, 8) +} + +func decodeInt16(f reflect.Value, s string) error { + return decodeNumber(f, s, 16) +} + +func decodeInt32(f reflect.Value, s string) error { + return decodeNumber(f, s, 32) +} + +func decodeInt64(f reflect.Value, s string) error { + return decodeNumber(f, s, 64) +} + +func decodeInt(f reflect.Value, s string) error { + return decodeNumber(f, s, 0) +} + +func decodeNumber(f reflect.Value, s string, bitSize int) error { + v, err := strconv.ParseInt(s, 10, bitSize) + if err != nil { + return err + } + f.SetInt(v) + return nil +} + +func decodeUint8(f reflect.Value, s string) error { + return decodeUnsignedNumber(f, s, 8) +} + +func decodeUint16(f reflect.Value, s string) error { + return decodeUnsignedNumber(f, s, 16) +} + +func decodeUint32(f reflect.Value, s string) error { + return decodeUnsignedNumber(f, s, 32) +} + +func decodeUint64(f reflect.Value, s string) error { + return decodeUnsignedNumber(f, s, 64) +} + +func decodeUint(f reflect.Value, s string) error { + return decodeUnsignedNumber(f, s, 0) +} + +func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error { + v, err := strconv.ParseUint(s, 10, bitSize) + if err != nil { + return err + } + f.SetUint(v) + return nil +} + +func decodeFloat32(f reflect.Value, s string) error { + v, err := strconv.ParseFloat(s, 32) + if err != nil { + return err + } + f.SetFloat(v) + return nil +} + +// although the default is float64, but we better define it. +func decodeFloat64(f reflect.Value, s string) error { + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return err + } + f.SetFloat(v) + return nil +} + +func decodeString(f reflect.Value, s string) error { + f.SetString(s) + return nil +} + +func decodeSlice(f reflect.Value, s string) error { + // []byte slice ([]uint8). + if f.Type().Elem().Kind() == reflect.Uint8 { + f.SetBytes([]byte(s)) + } + return nil +} + +func decodeUnsupported(v reflect.Value, s string) error { + return fmt.Errorf("redis.Scan(unsupported %s)", v.Type()) +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go new file mode 100644 index 0000000000..6839412ba2 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go @@ -0,0 +1,93 @@ +package hscan + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// structMap contains the map of struct fields for target structs +// indexed by the struct type. +type structMap struct { + m sync.Map +} + +func newStructMap() *structMap { + return new(structMap) +} + +func (s *structMap) get(t reflect.Type) *structSpec { + if v, ok := s.m.Load(t); ok { + return v.(*structSpec) + } + + spec := newStructSpec(t, "redis") + s.m.Store(t, spec) + return spec +} + +//------------------------------------------------------------------------------ + +// structSpec contains the list of all fields in a target struct. +type structSpec struct { + m map[string]*structField +} + +func (s *structSpec) set(tag string, sf *structField) { + s.m[tag] = sf +} + +func newStructSpec(t reflect.Type, fieldTag string) *structSpec { + numField := t.NumField() + out := &structSpec{ + m: make(map[string]*structField, numField), + } + + for i := 0; i < numField; i++ { + f := t.Field(i) + + tag := f.Tag.Get(fieldTag) + if tag == "" || tag == "-" { + continue + } + + tag = strings.Split(tag, ",")[0] + if tag == "" { + continue + } + + // Use the built-in decoder. + out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]}) + } + + return out +} + +//------------------------------------------------------------------------------ + +// structField represents a single field in a target struct. +type structField struct { + index int + fn decoderFunc +} + +//------------------------------------------------------------------------------ + +type StructValue struct { + spec *structSpec + value reflect.Value +} + +func (s StructValue) Scan(key string, value string) error { + field, ok := s.spec.m[key] + if !ok { + return nil + } + if err := field.fn(s.value.Field(field.index), value); err != nil { + t := s.value.Type() + return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s", + value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error()) + } + return nil +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/ya.make b/vendor/github.com/go-redis/redis/v8/internal/hscan/ya.make new file mode 100644 index 0000000000..b193b03b81 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/ya.make @@ -0,0 +1,14 @@ +GO_LIBRARY() + +LICENSE(BSD-2-Clause) + +SRCS( + hscan.go + structmap.go +) + +GO_TEST_SRCS(hscan_test.go) + +END() + +RECURSE(gotest) diff --git a/vendor/github.com/go-redis/redis/v8/internal/internal.go b/vendor/github.com/go-redis/redis/v8/internal/internal.go new file mode 100644 index 0000000000..4a59c599be --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/internal.go @@ -0,0 +1,29 @@ +package internal + +import ( + "time" + + "github.com/go-redis/redis/v8/internal/rand" +) + +func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration { + if retry < 0 { + panic("not reached") + } + if minBackoff == 0 { + return 0 + } + + d := minBackoff << uint(retry) + if d < minBackoff { + return maxBackoff + } + + d = minBackoff + time.Duration(rand.Int63n(int64(d))) + + if d > maxBackoff || d < minBackoff { + d = maxBackoff + } + + return d +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/log.go b/vendor/github.com/go-redis/redis/v8/internal/log.go new file mode 100644 index 0000000000..c8b9213de4 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/log.go @@ -0,0 +1,26 @@ +package internal + +import ( + "context" + "fmt" + "log" + "os" +) + +type Logging interface { + Printf(ctx context.Context, format string, v ...interface{}) +} + +type logger struct { + log *log.Logger +} + +func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) { + _ = l.log.Output(2, fmt.Sprintf(format, v...)) +} + +// Logger calls Output to print to the stderr. +// Arguments are handled in the manner of fmt.Print. +var Logger Logging = &logger{ + log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile), +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/once.go b/vendor/github.com/go-redis/redis/v8/internal/once.go new file mode 100644 index 0000000000..64f46272ae --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/once.go @@ -0,0 +1,60 @@ +/* +Copyright 2014 The Camlistore Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "sync" + "sync/atomic" +) + +// A Once will perform a successful action exactly once. +// +// Unlike a sync.Once, this Once's func returns an error +// and is re-armed on failure. +type Once struct { + m sync.Mutex + done uint32 +} + +// Do calls the function f if and only if Do has not been invoked +// without error for this instance of Once. In other words, given +// var once Once +// if once.Do(f) is called multiple times, only the first call will +// invoke f, even if f has a different value in each invocation unless +// f returns an error. A new instance of Once is required for each +// function to execute. +// +// Do is intended for initialization that must be run exactly once. Since f +// is niladic, it may be necessary to use a function literal to capture the +// arguments to a function to be invoked by Do: +// err := config.once.Do(func() error { return config.init(filename) }) +func (o *Once) Do(f func() error) error { + if atomic.LoadUint32(&o.done) == 1 { + return nil + } + // Slow-path. + o.m.Lock() + defer o.m.Unlock() + var err error + if o.done == 0 { + err = f() + if err == nil { + atomic.StoreUint32(&o.done, 1) + } + } + return err +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go new file mode 100644 index 0000000000..5661659865 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go @@ -0,0 +1,121 @@ +package pool + +import ( + "bufio" + "context" + "net" + "sync/atomic" + "time" + + "github.com/go-redis/redis/v8/internal/proto" +) + +var noDeadline = time.Time{} + +type Conn struct { + usedAt int64 // atomic + netConn net.Conn + + rd *proto.Reader + bw *bufio.Writer + wr *proto.Writer + + Inited bool + pooled bool + createdAt time.Time +} + +func NewConn(netConn net.Conn) *Conn { + cn := &Conn{ + netConn: netConn, + createdAt: time.Now(), + } + cn.rd = proto.NewReader(netConn) + cn.bw = bufio.NewWriter(netConn) + cn.wr = proto.NewWriter(cn.bw) + cn.SetUsedAt(time.Now()) + return cn +} + +func (cn *Conn) UsedAt() time.Time { + unix := atomic.LoadInt64(&cn.usedAt) + return time.Unix(unix, 0) +} + +func (cn *Conn) SetUsedAt(tm time.Time) { + atomic.StoreInt64(&cn.usedAt, tm.Unix()) +} + +func (cn *Conn) SetNetConn(netConn net.Conn) { + cn.netConn = netConn + cn.rd.Reset(netConn) + cn.bw.Reset(netConn) +} + +func (cn *Conn) Write(b []byte) (int, error) { + return cn.netConn.Write(b) +} + +func (cn *Conn) RemoteAddr() net.Addr { + if cn.netConn != nil { + return cn.netConn.RemoteAddr() + } + return nil +} + +func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error { + if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil { + return err + } + return fn(cn.rd) +} + +func (cn *Conn) WithWriter( + ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error, +) error { + if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil { + return err + } + + if cn.bw.Buffered() > 0 { + cn.bw.Reset(cn.netConn) + } + + if err := fn(cn.wr); err != nil { + return err + } + + return cn.bw.Flush() +} + +func (cn *Conn) Close() error { + return cn.netConn.Close() +} + +func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time { + tm := time.Now() + cn.SetUsedAt(tm) + + if timeout > 0 { + tm = tm.Add(timeout) + } + + if ctx != nil { + deadline, ok := ctx.Deadline() + if ok { + if timeout == 0 { + return deadline + } + if deadline.Before(tm) { + return deadline + } + return tm + } + } + + if timeout > 0 { + return tm + } + + return noDeadline +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go new file mode 100644 index 0000000000..44a4e779df --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go @@ -0,0 +1,557 @@ +package pool + +import ( + "context" + "errors" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/go-redis/redis/v8/internal" +) + +var ( + // ErrClosed performs any operation on the closed client will return this error. + ErrClosed = errors.New("redis: client is closed") + + // ErrPoolTimeout timed out waiting to get a connection from the connection pool. + ErrPoolTimeout = errors.New("redis: connection pool timeout") +) + +var timers = sync.Pool{ + New: func() interface{} { + t := time.NewTimer(time.Hour) + t.Stop() + return t + }, +} + +// Stats contains pool state information and accumulated stats. +type Stats struct { + Hits uint32 // number of times free connection was found in the pool + Misses uint32 // number of times free connection was NOT found in the pool + Timeouts uint32 // number of times a wait timeout occurred + + TotalConns uint32 // number of total connections in the pool + IdleConns uint32 // number of idle connections in the pool + StaleConns uint32 // number of stale connections removed from the pool +} + +type Pooler interface { + NewConn(context.Context) (*Conn, error) + CloseConn(*Conn) error + + Get(context.Context) (*Conn, error) + Put(context.Context, *Conn) + Remove(context.Context, *Conn, error) + + Len() int + IdleLen() int + Stats() *Stats + + Close() error +} + +type Options struct { + Dialer func(context.Context) (net.Conn, error) + OnClose func(*Conn) error + + PoolFIFO bool + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration +} + +type lastDialErrorWrap struct { + err error +} + +type ConnPool struct { + opt *Options + + dialErrorsNum uint32 // atomic + + lastDialError atomic.Value + + queue chan struct{} + + connsMu sync.Mutex + conns []*Conn + idleConns []*Conn + poolSize int + idleConnsLen int + + stats Stats + + _closed uint32 // atomic + closedCh chan struct{} +} + +var _ Pooler = (*ConnPool)(nil) + +func NewConnPool(opt *Options) *ConnPool { + p := &ConnPool{ + opt: opt, + + queue: make(chan struct{}, opt.PoolSize), + conns: make([]*Conn, 0, opt.PoolSize), + idleConns: make([]*Conn, 0, opt.PoolSize), + closedCh: make(chan struct{}), + } + + p.connsMu.Lock() + p.checkMinIdleConns() + p.connsMu.Unlock() + + if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 { + go p.reaper(opt.IdleCheckFrequency) + } + + return p +} + +func (p *ConnPool) checkMinIdleConns() { + if p.opt.MinIdleConns == 0 { + return + } + for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns { + p.poolSize++ + p.idleConnsLen++ + + go func() { + err := p.addIdleConn() + if err != nil && err != ErrClosed { + p.connsMu.Lock() + p.poolSize-- + p.idleConnsLen-- + p.connsMu.Unlock() + } + }() + } +} + +func (p *ConnPool) addIdleConn() error { + cn, err := p.dialConn(context.TODO(), true) + if err != nil { + return err + } + + p.connsMu.Lock() + defer p.connsMu.Unlock() + + // It is not allowed to add new connections to the closed connection pool. + if p.closed() { + _ = cn.Close() + return ErrClosed + } + + p.conns = append(p.conns, cn) + p.idleConns = append(p.idleConns, cn) + return nil +} + +func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) { + return p.newConn(ctx, false) +} + +func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) { + cn, err := p.dialConn(ctx, pooled) + if err != nil { + return nil, err + } + + p.connsMu.Lock() + defer p.connsMu.Unlock() + + // It is not allowed to add new connections to the closed connection pool. + if p.closed() { + _ = cn.Close() + return nil, ErrClosed + } + + p.conns = append(p.conns, cn) + if pooled { + // If pool is full remove the cn on next Put. + if p.poolSize >= p.opt.PoolSize { + cn.pooled = false + } else { + p.poolSize++ + } + } + + return cn, nil +} + +func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) { + if p.closed() { + return nil, ErrClosed + } + + if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) { + return nil, p.getLastDialError() + } + + netConn, err := p.opt.Dialer(ctx) + if err != nil { + p.setLastDialError(err) + if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) { + go p.tryDial() + } + return nil, err + } + + cn := NewConn(netConn) + cn.pooled = pooled + return cn, nil +} + +func (p *ConnPool) tryDial() { + for { + if p.closed() { + return + } + + conn, err := p.opt.Dialer(context.Background()) + if err != nil { + p.setLastDialError(err) + time.Sleep(time.Second) + continue + } + + atomic.StoreUint32(&p.dialErrorsNum, 0) + _ = conn.Close() + return + } +} + +func (p *ConnPool) setLastDialError(err error) { + p.lastDialError.Store(&lastDialErrorWrap{err: err}) +} + +func (p *ConnPool) getLastDialError() error { + err, _ := p.lastDialError.Load().(*lastDialErrorWrap) + if err != nil { + return err.err + } + return nil +} + +// Get returns existed connection from the pool or creates a new one. +func (p *ConnPool) Get(ctx context.Context) (*Conn, error) { + if p.closed() { + return nil, ErrClosed + } + + if err := p.waitTurn(ctx); err != nil { + return nil, err + } + + for { + p.connsMu.Lock() + cn, err := p.popIdle() + p.connsMu.Unlock() + + if err != nil { + return nil, err + } + + if cn == nil { + break + } + + if p.isStaleConn(cn) { + _ = p.CloseConn(cn) + continue + } + + atomic.AddUint32(&p.stats.Hits, 1) + return cn, nil + } + + atomic.AddUint32(&p.stats.Misses, 1) + + newcn, err := p.newConn(ctx, true) + if err != nil { + p.freeTurn() + return nil, err + } + + return newcn, nil +} + +func (p *ConnPool) getTurn() { + p.queue <- struct{}{} +} + +func (p *ConnPool) waitTurn(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + select { + case p.queue <- struct{}{}: + return nil + default: + } + + timer := timers.Get().(*time.Timer) + timer.Reset(p.opt.PoolTimeout) + + select { + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + timers.Put(timer) + return ctx.Err() + case p.queue <- struct{}{}: + if !timer.Stop() { + <-timer.C + } + timers.Put(timer) + return nil + case <-timer.C: + timers.Put(timer) + atomic.AddUint32(&p.stats.Timeouts, 1) + return ErrPoolTimeout + } +} + +func (p *ConnPool) freeTurn() { + <-p.queue +} + +func (p *ConnPool) popIdle() (*Conn, error) { + if p.closed() { + return nil, ErrClosed + } + n := len(p.idleConns) + if n == 0 { + return nil, nil + } + + var cn *Conn + if p.opt.PoolFIFO { + cn = p.idleConns[0] + copy(p.idleConns, p.idleConns[1:]) + p.idleConns = p.idleConns[:n-1] + } else { + idx := n - 1 + cn = p.idleConns[idx] + p.idleConns = p.idleConns[:idx] + } + p.idleConnsLen-- + p.checkMinIdleConns() + return cn, nil +} + +func (p *ConnPool) Put(ctx context.Context, cn *Conn) { + if cn.rd.Buffered() > 0 { + internal.Logger.Printf(ctx, "Conn has unread data") + p.Remove(ctx, cn, BadConnError{}) + return + } + + if !cn.pooled { + p.Remove(ctx, cn, nil) + return + } + + p.connsMu.Lock() + p.idleConns = append(p.idleConns, cn) + p.idleConnsLen++ + p.connsMu.Unlock() + p.freeTurn() +} + +func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) { + p.removeConnWithLock(cn) + p.freeTurn() + _ = p.closeConn(cn) +} + +func (p *ConnPool) CloseConn(cn *Conn) error { + p.removeConnWithLock(cn) + return p.closeConn(cn) +} + +func (p *ConnPool) removeConnWithLock(cn *Conn) { + p.connsMu.Lock() + p.removeConn(cn) + p.connsMu.Unlock() +} + +func (p *ConnPool) removeConn(cn *Conn) { + for i, c := range p.conns { + if c == cn { + p.conns = append(p.conns[:i], p.conns[i+1:]...) + if cn.pooled { + p.poolSize-- + p.checkMinIdleConns() + } + return + } + } +} + +func (p *ConnPool) closeConn(cn *Conn) error { + if p.opt.OnClose != nil { + _ = p.opt.OnClose(cn) + } + return cn.Close() +} + +// Len returns total number of connections. +func (p *ConnPool) Len() int { + p.connsMu.Lock() + n := len(p.conns) + p.connsMu.Unlock() + return n +} + +// IdleLen returns number of idle connections. +func (p *ConnPool) IdleLen() int { + p.connsMu.Lock() + n := p.idleConnsLen + p.connsMu.Unlock() + return n +} + +func (p *ConnPool) Stats() *Stats { + idleLen := p.IdleLen() + return &Stats{ + Hits: atomic.LoadUint32(&p.stats.Hits), + Misses: atomic.LoadUint32(&p.stats.Misses), + Timeouts: atomic.LoadUint32(&p.stats.Timeouts), + + TotalConns: uint32(p.Len()), + IdleConns: uint32(idleLen), + StaleConns: atomic.LoadUint32(&p.stats.StaleConns), + } +} + +func (p *ConnPool) closed() bool { + return atomic.LoadUint32(&p._closed) == 1 +} + +func (p *ConnPool) Filter(fn func(*Conn) bool) error { + p.connsMu.Lock() + defer p.connsMu.Unlock() + + var firstErr error + for _, cn := range p.conns { + if fn(cn) { + if err := p.closeConn(cn); err != nil && firstErr == nil { + firstErr = err + } + } + } + return firstErr +} + +func (p *ConnPool) Close() error { + if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) { + return ErrClosed + } + close(p.closedCh) + + var firstErr error + p.connsMu.Lock() + for _, cn := range p.conns { + if err := p.closeConn(cn); err != nil && firstErr == nil { + firstErr = err + } + } + p.conns = nil + p.poolSize = 0 + p.idleConns = nil + p.idleConnsLen = 0 + p.connsMu.Unlock() + + return firstErr +} + +func (p *ConnPool) reaper(frequency time.Duration) { + ticker := time.NewTicker(frequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + // It is possible that ticker and closedCh arrive together, + // and select pseudo-randomly pick ticker case, we double + // check here to prevent being executed after closed. + if p.closed() { + return + } + _, err := p.ReapStaleConns() + if err != nil { + internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err) + continue + } + case <-p.closedCh: + return + } + } +} + +func (p *ConnPool) ReapStaleConns() (int, error) { + var n int + for { + p.getTurn() + + p.connsMu.Lock() + cn := p.reapStaleConn() + p.connsMu.Unlock() + + p.freeTurn() + + if cn != nil { + _ = p.closeConn(cn) + n++ + } else { + break + } + } + atomic.AddUint32(&p.stats.StaleConns, uint32(n)) + return n, nil +} + +func (p *ConnPool) reapStaleConn() *Conn { + if len(p.idleConns) == 0 { + return nil + } + + cn := p.idleConns[0] + if !p.isStaleConn(cn) { + return nil + } + + p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...) + p.idleConnsLen-- + p.removeConn(cn) + + return cn +} + +func (p *ConnPool) isStaleConn(cn *Conn) bool { + if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 { + return false + } + + now := time.Now() + if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout { + return true + } + if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge { + return true + } + + return false +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go new file mode 100644 index 0000000000..5a3fde191b --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go @@ -0,0 +1,58 @@ +package pool + +import "context" + +type SingleConnPool struct { + pool Pooler + cn *Conn + stickyErr error +} + +var _ Pooler = (*SingleConnPool)(nil) + +func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool { + return &SingleConnPool{ + pool: pool, + cn: cn, + } +} + +func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) { + return p.pool.NewConn(ctx) +} + +func (p *SingleConnPool) CloseConn(cn *Conn) error { + return p.pool.CloseConn(cn) +} + +func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) { + if p.stickyErr != nil { + return nil, p.stickyErr + } + return p.cn, nil +} + +func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {} + +func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) { + p.cn = nil + p.stickyErr = reason +} + +func (p *SingleConnPool) Close() error { + p.cn = nil + p.stickyErr = ErrClosed + return nil +} + +func (p *SingleConnPool) Len() int { + return 0 +} + +func (p *SingleConnPool) IdleLen() int { + return 0 +} + +func (p *SingleConnPool) Stats() *Stats { + return &Stats{} +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go new file mode 100644 index 0000000000..3adb99bc82 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go @@ -0,0 +1,201 @@ +package pool + +import ( + "context" + "errors" + "fmt" + "sync/atomic" +) + +const ( + stateDefault = 0 + stateInited = 1 + stateClosed = 2 +) + +type BadConnError struct { + wrapped error +} + +var _ error = (*BadConnError)(nil) + +func (e BadConnError) Error() string { + s := "redis: Conn is in a bad state" + if e.wrapped != nil { + s += ": " + e.wrapped.Error() + } + return s +} + +func (e BadConnError) Unwrap() error { + return e.wrapped +} + +//------------------------------------------------------------------------------ + +type StickyConnPool struct { + pool Pooler + shared int32 // atomic + + state uint32 // atomic + ch chan *Conn + + _badConnError atomic.Value +} + +var _ Pooler = (*StickyConnPool)(nil) + +func NewStickyConnPool(pool Pooler) *StickyConnPool { + p, ok := pool.(*StickyConnPool) + if !ok { + p = &StickyConnPool{ + pool: pool, + ch: make(chan *Conn, 1), + } + } + atomic.AddInt32(&p.shared, 1) + return p +} + +func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) { + return p.pool.NewConn(ctx) +} + +func (p *StickyConnPool) CloseConn(cn *Conn) error { + return p.pool.CloseConn(cn) +} + +func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) { + // In worst case this races with Close which is not a very common operation. + for i := 0; i < 1000; i++ { + switch atomic.LoadUint32(&p.state) { + case stateDefault: + cn, err := p.pool.Get(ctx) + if err != nil { + return nil, err + } + if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) { + return cn, nil + } + p.pool.Remove(ctx, cn, ErrClosed) + case stateInited: + if err := p.badConnError(); err != nil { + return nil, err + } + cn, ok := <-p.ch + if !ok { + return nil, ErrClosed + } + return cn, nil + case stateClosed: + return nil, ErrClosed + default: + panic("not reached") + } + } + return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop") +} + +func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) { + defer func() { + if recover() != nil { + p.freeConn(ctx, cn) + } + }() + p.ch <- cn +} + +func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) { + if err := p.badConnError(); err != nil { + p.pool.Remove(ctx, cn, err) + } else { + p.pool.Put(ctx, cn) + } +} + +func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) { + defer func() { + if recover() != nil { + p.pool.Remove(ctx, cn, ErrClosed) + } + }() + p._badConnError.Store(BadConnError{wrapped: reason}) + p.ch <- cn +} + +func (p *StickyConnPool) Close() error { + if shared := atomic.AddInt32(&p.shared, -1); shared > 0 { + return nil + } + + for i := 0; i < 1000; i++ { + state := atomic.LoadUint32(&p.state) + if state == stateClosed { + return ErrClosed + } + if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) { + close(p.ch) + cn, ok := <-p.ch + if ok { + p.freeConn(context.TODO(), cn) + } + return nil + } + } + + return errors.New("redis: StickyConnPool.Close: infinite loop") +} + +func (p *StickyConnPool) Reset(ctx context.Context) error { + if p.badConnError() == nil { + return nil + } + + select { + case cn, ok := <-p.ch: + if !ok { + return ErrClosed + } + p.pool.Remove(ctx, cn, ErrClosed) + p._badConnError.Store(BadConnError{wrapped: nil}) + default: + return errors.New("redis: StickyConnPool does not have a Conn") + } + + if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) { + state := atomic.LoadUint32(&p.state) + return fmt.Errorf("redis: invalid StickyConnPool state: %d", state) + } + + return nil +} + +func (p *StickyConnPool) badConnError() error { + if v := p._badConnError.Load(); v != nil { + if err := v.(BadConnError); err.wrapped != nil { + return err + } + } + return nil +} + +func (p *StickyConnPool) Len() int { + switch atomic.LoadUint32(&p.state) { + case stateDefault: + return 0 + case stateInited: + return 1 + case stateClosed: + return 0 + default: + panic("not reached") + } +} + +func (p *StickyConnPool) IdleLen() int { + return len(p.ch) +} + +func (p *StickyConnPool) Stats() *Stats { + return &Stats{} +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/ya.make b/vendor/github.com/go-redis/redis/v8/internal/pool/ya.make new file mode 100644 index 0000000000..31e095aba1 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/pool/ya.make @@ -0,0 +1,22 @@ +GO_LIBRARY() + +LICENSE(BSD-2-Clause) + +SRCS( + conn.go + pool.go + pool_single.go + pool_sticky.go +) + +GO_TEST_SRCS(export_test.go) + +GO_XTEST_SRCS( + bench_test.go + main_test.go + pool_test.go +) + +END() + +RECURSE(gotest) diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go new file mode 100644 index 0000000000..0e6ca779b1 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go @@ -0,0 +1,332 @@ +package proto + +import ( + "bufio" + "fmt" + "io" + + "github.com/go-redis/redis/v8/internal/util" +) + +// redis resp protocol data type. +const ( + ErrorReply = '-' + StatusReply = '+' + IntReply = ':' + StringReply = '$' + ArrayReply = '*' +) + +//------------------------------------------------------------------------------ + +const Nil = RedisError("redis: nil") // nolint:errname + +type RedisError string + +func (e RedisError) Error() string { return string(e) } + +func (RedisError) RedisError() {} + +//------------------------------------------------------------------------------ + +type MultiBulkParse func(*Reader, int64) (interface{}, error) + +type Reader struct { + rd *bufio.Reader + _buf []byte +} + +func NewReader(rd io.Reader) *Reader { + return &Reader{ + rd: bufio.NewReader(rd), + _buf: make([]byte, 64), + } +} + +func (r *Reader) Buffered() int { + return r.rd.Buffered() +} + +func (r *Reader) Peek(n int) ([]byte, error) { + return r.rd.Peek(n) +} + +func (r *Reader) Reset(rd io.Reader) { + r.rd.Reset(rd) +} + +func (r *Reader) ReadLine() ([]byte, error) { + line, err := r.readLine() + if err != nil { + return nil, err + } + if isNilReply(line) { + return nil, Nil + } + return line, nil +} + +// readLine that returns an error if: +// - there is a pending read error; +// - or line does not end with \r\n. +func (r *Reader) readLine() ([]byte, error) { + b, err := r.rd.ReadSlice('\n') + if err != nil { + if err != bufio.ErrBufferFull { + return nil, err + } + + full := make([]byte, len(b)) + copy(full, b) + + b, err = r.rd.ReadBytes('\n') + if err != nil { + return nil, err + } + + full = append(full, b...) //nolint:makezero + b = full + } + if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' { + return nil, fmt.Errorf("redis: invalid reply: %q", b) + } + return b[:len(b)-2], nil +} + +func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) { + line, err := r.ReadLine() + if err != nil { + return nil, err + } + + switch line[0] { + case ErrorReply: + return nil, ParseErrorReply(line) + case StatusReply: + return string(line[1:]), nil + case IntReply: + return util.ParseInt(line[1:], 10, 64) + case StringReply: + return r.readStringReply(line) + case ArrayReply: + n, err := parseArrayLen(line) + if err != nil { + return nil, err + } + if m == nil { + err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line) + return nil, err + } + return m(r, n) + } + return nil, fmt.Errorf("redis: can't parse %.100q", line) +} + +func (r *Reader) ReadIntReply() (int64, error) { + line, err := r.ReadLine() + if err != nil { + return 0, err + } + switch line[0] { + case ErrorReply: + return 0, ParseErrorReply(line) + case IntReply: + return util.ParseInt(line[1:], 10, 64) + default: + return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line) + } +} + +func (r *Reader) ReadString() (string, error) { + line, err := r.ReadLine() + if err != nil { + return "", err + } + switch line[0] { + case ErrorReply: + return "", ParseErrorReply(line) + case StringReply: + return r.readStringReply(line) + case StatusReply: + return string(line[1:]), nil + case IntReply: + return string(line[1:]), nil + default: + return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line) + } +} + +func (r *Reader) readStringReply(line []byte) (string, error) { + if isNilReply(line) { + return "", Nil + } + + replyLen, err := util.Atoi(line[1:]) + if err != nil { + return "", err + } + + b := make([]byte, replyLen+2) + _, err = io.ReadFull(r.rd, b) + if err != nil { + return "", err + } + + return util.BytesToString(b[:replyLen]), nil +} + +func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) { + line, err := r.ReadLine() + if err != nil { + return nil, err + } + switch line[0] { + case ErrorReply: + return nil, ParseErrorReply(line) + case ArrayReply: + n, err := parseArrayLen(line) + if err != nil { + return nil, err + } + return m(r, n) + default: + return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line) + } +} + +func (r *Reader) ReadArrayLen() (int, error) { + line, err := r.ReadLine() + if err != nil { + return 0, err + } + switch line[0] { + case ErrorReply: + return 0, ParseErrorReply(line) + case ArrayReply: + n, err := parseArrayLen(line) + if err != nil { + return 0, err + } + return int(n), nil + default: + return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line) + } +} + +func (r *Reader) ReadScanReply() ([]string, uint64, error) { + n, err := r.ReadArrayLen() + if err != nil { + return nil, 0, err + } + if n != 2 { + return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n) + } + + cursor, err := r.ReadUint() + if err != nil { + return nil, 0, err + } + + n, err = r.ReadArrayLen() + if err != nil { + return nil, 0, err + } + + keys := make([]string, n) + + for i := 0; i < n; i++ { + key, err := r.ReadString() + if err != nil { + return nil, 0, err + } + keys[i] = key + } + + return keys, cursor, err +} + +func (r *Reader) ReadInt() (int64, error) { + b, err := r.readTmpBytesReply() + if err != nil { + return 0, err + } + return util.ParseInt(b, 10, 64) +} + +func (r *Reader) ReadUint() (uint64, error) { + b, err := r.readTmpBytesReply() + if err != nil { + return 0, err + } + return util.ParseUint(b, 10, 64) +} + +func (r *Reader) ReadFloatReply() (float64, error) { + b, err := r.readTmpBytesReply() + if err != nil { + return 0, err + } + return util.ParseFloat(b, 64) +} + +func (r *Reader) readTmpBytesReply() ([]byte, error) { + line, err := r.ReadLine() + if err != nil { + return nil, err + } + switch line[0] { + case ErrorReply: + return nil, ParseErrorReply(line) + case StringReply: + return r._readTmpBytesReply(line) + case StatusReply: + return line[1:], nil + default: + return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line) + } +} + +func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) { + if isNilReply(line) { + return nil, Nil + } + + replyLen, err := util.Atoi(line[1:]) + if err != nil { + return nil, err + } + + buf := r.buf(replyLen + 2) + _, err = io.ReadFull(r.rd, buf) + if err != nil { + return nil, err + } + + return buf[:replyLen], nil +} + +func (r *Reader) buf(n int) []byte { + if n <= cap(r._buf) { + return r._buf[:n] + } + d := n - cap(r._buf) + r._buf = append(r._buf, make([]byte, d)...) + return r._buf +} + +func isNilReply(b []byte) bool { + return len(b) == 3 && + (b[0] == StringReply || b[0] == ArrayReply) && + b[1] == '-' && b[2] == '1' +} + +func ParseErrorReply(line []byte) error { + return RedisError(string(line[1:])) +} + +func parseArrayLen(line []byte) (int64, error) { + if isNilReply(line) { + return 0, Nil + } + return util.ParseInt(line[1:], 10, 64) +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go new file mode 100644 index 0000000000..0e994765fe --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go @@ -0,0 +1,180 @@ +package proto + +import ( + "encoding" + "fmt" + "reflect" + "time" + + "github.com/go-redis/redis/v8/internal/util" +) + +// Scan parses bytes `b` to `v` with appropriate type. +//nolint:gocyclo +func Scan(b []byte, v interface{}) error { + switch v := v.(type) { + case nil: + return fmt.Errorf("redis: Scan(nil)") + case *string: + *v = util.BytesToString(b) + return nil + case *[]byte: + *v = b + return nil + case *int: + var err error + *v, err = util.Atoi(b) + return err + case *int8: + n, err := util.ParseInt(b, 10, 8) + if err != nil { + return err + } + *v = int8(n) + return nil + case *int16: + n, err := util.ParseInt(b, 10, 16) + if err != nil { + return err + } + *v = int16(n) + return nil + case *int32: + n, err := util.ParseInt(b, 10, 32) + if err != nil { + return err + } + *v = int32(n) + return nil + case *int64: + n, err := util.ParseInt(b, 10, 64) + if err != nil { + return err + } + *v = n + return nil + case *uint: + n, err := util.ParseUint(b, 10, 64) + if err != nil { + return err + } + *v = uint(n) + return nil + case *uint8: + n, err := util.ParseUint(b, 10, 8) + if err != nil { + return err + } + *v = uint8(n) + return nil + case *uint16: + n, err := util.ParseUint(b, 10, 16) + if err != nil { + return err + } + *v = uint16(n) + return nil + case *uint32: + n, err := util.ParseUint(b, 10, 32) + if err != nil { + return err + } + *v = uint32(n) + return nil + case *uint64: + n, err := util.ParseUint(b, 10, 64) + if err != nil { + return err + } + *v = n + return nil + case *float32: + n, err := util.ParseFloat(b, 32) + if err != nil { + return err + } + *v = float32(n) + return err + case *float64: + var err error + *v, err = util.ParseFloat(b, 64) + return err + case *bool: + *v = len(b) == 1 && b[0] == '1' + return nil + case *time.Time: + var err error + *v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b)) + return err + case *time.Duration: + n, err := util.ParseInt(b, 10, 64) + if err != nil { + return err + } + *v = time.Duration(n) + return nil + case encoding.BinaryUnmarshaler: + return v.UnmarshalBinary(b) + default: + return fmt.Errorf( + "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v) + } +} + +func ScanSlice(data []string, slice interface{}) error { + v := reflect.ValueOf(slice) + if !v.IsValid() { + return fmt.Errorf("redis: ScanSlice(nil)") + } + if v.Kind() != reflect.Ptr { + return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice) + } + v = v.Elem() + if v.Kind() != reflect.Slice { + return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice) + } + + next := makeSliceNextElemFunc(v) + for i, s := range data { + elem := next() + if err := Scan([]byte(s), elem.Addr().Interface()); err != nil { + err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err) + return err + } + } + + return nil +} + +func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value { + elemType := v.Type().Elem() + + if elemType.Kind() == reflect.Ptr { + elemType = elemType.Elem() + return func() reflect.Value { + if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Len()+1)) + elem := v.Index(v.Len() - 1) + if elem.IsNil() { + elem.Set(reflect.New(elemType)) + } + return elem.Elem() + } + + elem := reflect.New(elemType) + v.Set(reflect.Append(v, elem)) + return elem.Elem() + } + } + + zero := reflect.Zero(elemType) + return func() reflect.Value { + if v.Len() < v.Cap() { + v.Set(v.Slice(0, v.Len()+1)) + return v.Index(v.Len() - 1) + } + + v.Set(reflect.Append(v, zero)) + return v.Index(v.Len() - 1) + } +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go new file mode 100644 index 0000000000..c4260981ed --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go @@ -0,0 +1,155 @@ +package proto + +import ( + "encoding" + "fmt" + "io" + "strconv" + "time" + + "github.com/go-redis/redis/v8/internal/util" +) + +type writer interface { + io.Writer + io.ByteWriter + // io.StringWriter + WriteString(s string) (n int, err error) +} + +type Writer struct { + writer + + lenBuf []byte + numBuf []byte +} + +func NewWriter(wr writer) *Writer { + return &Writer{ + writer: wr, + + lenBuf: make([]byte, 64), + numBuf: make([]byte, 64), + } +} + +func (w *Writer) WriteArgs(args []interface{}) error { + if err := w.WriteByte(ArrayReply); err != nil { + return err + } + + if err := w.writeLen(len(args)); err != nil { + return err + } + + for _, arg := range args { + if err := w.WriteArg(arg); err != nil { + return err + } + } + + return nil +} + +func (w *Writer) writeLen(n int) error { + w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10) + w.lenBuf = append(w.lenBuf, '\r', '\n') + _, err := w.Write(w.lenBuf) + return err +} + +func (w *Writer) WriteArg(v interface{}) error { + switch v := v.(type) { + case nil: + return w.string("") + case string: + return w.string(v) + case []byte: + return w.bytes(v) + case int: + return w.int(int64(v)) + case int8: + return w.int(int64(v)) + case int16: + return w.int(int64(v)) + case int32: + return w.int(int64(v)) + case int64: + return w.int(v) + case uint: + return w.uint(uint64(v)) + case uint8: + return w.uint(uint64(v)) + case uint16: + return w.uint(uint64(v)) + case uint32: + return w.uint(uint64(v)) + case uint64: + return w.uint(v) + case float32: + return w.float(float64(v)) + case float64: + return w.float(v) + case bool: + if v { + return w.int(1) + } + return w.int(0) + case time.Time: + w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) + return w.bytes(w.numBuf) + case time.Duration: + return w.int(v.Nanoseconds()) + case encoding.BinaryMarshaler: + b, err := v.MarshalBinary() + if err != nil { + return err + } + return w.bytes(b) + default: + return fmt.Errorf( + "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v) + } +} + +func (w *Writer) bytes(b []byte) error { + if err := w.WriteByte(StringReply); err != nil { + return err + } + + if err := w.writeLen(len(b)); err != nil { + return err + } + + if _, err := w.Write(b); err != nil { + return err + } + + return w.crlf() +} + +func (w *Writer) string(s string) error { + return w.bytes(util.StringToBytes(s)) +} + +func (w *Writer) uint(n uint64) error { + w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10) + return w.bytes(w.numBuf) +} + +func (w *Writer) int(n int64) error { + w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10) + return w.bytes(w.numBuf) +} + +func (w *Writer) float(f float64) error { + w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64) + return w.bytes(w.numBuf) +} + +func (w *Writer) crlf() error { + if err := w.WriteByte('\r'); err != nil { + return err + } + return w.WriteByte('\n') +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/ya.make b/vendor/github.com/go-redis/redis/v8/internal/proto/ya.make new file mode 100644 index 0000000000..6ba8a5f13e --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/proto/ya.make @@ -0,0 +1,20 @@ +GO_LIBRARY() + +LICENSE(BSD-2-Clause) + +SRCS( + reader.go + scan.go + writer.go +) + +GO_XTEST_SRCS( + proto_test.go + reader_test.go + scan_test.go + writer_test.go +) + +END() + +RECURSE(gotest) diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go new file mode 100644 index 0000000000..2edccba94f --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go @@ -0,0 +1,50 @@ +package rand + +import ( + "math/rand" + "sync" +) + +// Int returns a non-negative pseudo-random int. +func Int() int { return pseudo.Int() } + +// Intn returns, as an int, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func Intn(n int) int { return pseudo.Intn(n) } + +// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n). +// It panics if n <= 0. +func Int63n(n int64) int64 { return pseudo.Int63n(n) } + +// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n). +func Perm(n int) []int { return pseudo.Perm(n) } + +// Seed uses the provided seed value to initialize the default Source to a +// deterministic state. If Seed is not called, the generator behaves as if +// seeded by Seed(1). +func Seed(n int64) { pseudo.Seed(n) } + +var pseudo = rand.New(&source{src: rand.NewSource(1)}) + +type source struct { + src rand.Source + mu sync.Mutex +} + +func (s *source) Int63() int64 { + s.mu.Lock() + n := s.src.Int63() + s.mu.Unlock() + return n +} + +func (s *source) Seed(seed int64) { + s.mu.Lock() + s.src.Seed(seed) + s.mu.Unlock() +} + +// Shuffle pseudo-randomizes the order of elements. +// n is the number of elements. +// swap swaps the elements with indexes i and j. +func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) } diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/ya.make b/vendor/github.com/go-redis/redis/v8/internal/rand/ya.make new file mode 100644 index 0000000000..eb2f7feb15 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/rand/ya.make @@ -0,0 +1,7 @@ +GO_LIBRARY() + +LICENSE(BSD-2-Clause) + +SRCS(rand.go) + +END() diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go new file mode 100644 index 0000000000..9f2e418f79 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go @@ -0,0 +1,21 @@ +//go:build !appengine +// +build !appengine + +package internal + +import "unsafe" + +// String converts byte slice to string. +func String(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// Bytes converts string to byte slice. +func Bytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/util.go b/vendor/github.com/go-redis/redis/v8/internal/util.go new file mode 100644 index 0000000000..e34a7f0326 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/util.go @@ -0,0 +1,46 @@ +package internal + +import ( + "context" + "time" + + "github.com/go-redis/redis/v8/internal/util" +) + +func Sleep(ctx context.Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func ToLower(s string) string { + if isLower(s) { + return s + } + + b := make([]byte, len(s)) + for i := range b { + c := s[i] + if c >= 'A' && c <= 'Z' { + c += 'a' - 'A' + } + b[i] = c + } + return util.BytesToString(b) +} + +func isLower(s string) bool { + for i := 0; i < len(s); i++ { + c := s[i] + if c >= 'A' && c <= 'Z' { + return false + } + } + return true +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go new file mode 100644 index 0000000000..db5033802a --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go @@ -0,0 +1,19 @@ +package util + +import "strconv" + +func Atoi(b []byte) (int, error) { + return strconv.Atoi(BytesToString(b)) +} + +func ParseInt(b []byte, base int, bitSize int) (int64, error) { + return strconv.ParseInt(BytesToString(b), base, bitSize) +} + +func ParseUint(b []byte, base int, bitSize int) (uint64, error) { + return strconv.ParseUint(BytesToString(b), base, bitSize) +} + +func ParseFloat(b []byte, bitSize int) (float64, error) { + return strconv.ParseFloat(BytesToString(b), bitSize) +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go new file mode 100644 index 0000000000..daa8d7692a --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go @@ -0,0 +1,23 @@ +//go:build !appengine +// +build !appengine + +package util + +import ( + "unsafe" +) + +// BytesToString converts byte slice to string. +func BytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// StringToBytes converts string to byte slice. +func StringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/ya.make b/vendor/github.com/go-redis/redis/v8/internal/util/ya.make new file mode 100644 index 0000000000..3497e3cf34 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/util/ya.make @@ -0,0 +1,10 @@ +GO_LIBRARY() + +LICENSE(BSD-2-Clause) + +SRCS( + strconv.go + unsafe.go +) + +END() diff --git a/vendor/github.com/go-redis/redis/v8/internal/ya.make b/vendor/github.com/go-redis/redis/v8/internal/ya.make new file mode 100644 index 0000000000..1cf123fac4 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/internal/ya.make @@ -0,0 +1,26 @@ +GO_LIBRARY() + +LICENSE(BSD-2-Clause) + +SRCS( + arg.go + internal.go + log.go + once.go + unsafe.go + util.go +) + +GO_TEST_SRCS(internal_test.go) + +END() + +RECURSE( + gotest + hashtag + hscan + pool + proto + rand + util +) diff --git a/vendor/github.com/go-redis/redis/v8/iterator.go b/vendor/github.com/go-redis/redis/v8/iterator.go new file mode 100644 index 0000000000..2f8bc2beda --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/iterator.go @@ -0,0 +1,77 @@ +package redis + +import ( + "context" + "sync" +) + +// ScanIterator is used to incrementally iterate over a collection of elements. +// It's safe for concurrent use by multiple goroutines. +type ScanIterator struct { + mu sync.Mutex // protects Scanner and pos + cmd *ScanCmd + pos int +} + +// Err returns the last iterator error, if any. +func (it *ScanIterator) Err() error { + it.mu.Lock() + err := it.cmd.Err() + it.mu.Unlock() + return err +} + +// Next advances the cursor and returns true if more values can be read. +func (it *ScanIterator) Next(ctx context.Context) bool { + it.mu.Lock() + defer it.mu.Unlock() + + // Instantly return on errors. + if it.cmd.Err() != nil { + return false + } + + // Advance cursor, check if we are still within range. + if it.pos < len(it.cmd.page) { + it.pos++ + return true + } + + for { + // Return if there is no more data to fetch. + if it.cmd.cursor == 0 { + return false + } + + // Fetch next page. + switch it.cmd.args[0] { + case "scan", "qscan": + it.cmd.args[1] = it.cmd.cursor + default: + it.cmd.args[2] = it.cmd.cursor + } + + err := it.cmd.process(ctx, it.cmd) + if err != nil { + return false + } + + it.pos = 1 + + // Redis can occasionally return empty page. + if len(it.cmd.page) > 0 { + return true + } + } +} + +// Val returns the key/field at the current cursor position. +func (it *ScanIterator) Val() string { + var v string + it.mu.Lock() + if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) { + v = it.cmd.page[it.pos-1] + } + it.mu.Unlock() + return v +} diff --git a/vendor/github.com/go-redis/redis/v8/options.go b/vendor/github.com/go-redis/redis/v8/options.go new file mode 100644 index 0000000000..a4abe32c3a --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/options.go @@ -0,0 +1,429 @@ +package redis + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "net/url" + "runtime" + "sort" + "strconv" + "strings" + "time" + + "github.com/go-redis/redis/v8/internal/pool" +) + +// Limiter is the interface of a rate limiter or a circuit breaker. +type Limiter interface { + // Allow returns nil if operation is allowed or an error otherwise. + // If operation is allowed client must ReportResult of the operation + // whether it is a success or a failure. + Allow() error + // ReportResult reports the result of the previously allowed operation. + // nil indicates a success, non-nil error usually indicates a failure. + ReportResult(result error) +} + +// Options keeps the settings to setup redis connection. +type Options struct { + // The network type, either tcp or unix. + // Default is tcp. + Network string + // host:port address. + Addr string + + // Dialer creates new network connection and has priority over + // Network and Addr options. + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + + // Hook that is called when new connection is established. + OnConnect func(ctx context.Context, cn *Conn) error + + // Use the specified Username to authenticate the current connection + // with one of the connections defined in the ACL list when connecting + // to a Redis 6.0 instance, or greater, that is using the Redis ACL system. + Username string + // Optional password. Must match the password specified in the + // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower), + // or the User Password when connecting to a Redis 6.0 instance, or greater, + // that is using the Redis ACL system. + Password string + + // Database to be selected after connecting to the server. + DB int + + // Maximum number of retries before giving up. + // Default is 3 retries; -1 (not 0) disables retries. + MaxRetries int + // Minimum backoff between each retry. + // Default is 8 milliseconds; -1 disables backoff. + MinRetryBackoff time.Duration + // Maximum backoff between each retry. + // Default is 512 milliseconds; -1 disables backoff. + MaxRetryBackoff time.Duration + + // Dial timeout for establishing new connections. + // Default is 5 seconds. + DialTimeout time.Duration + // Timeout for socket reads. If reached, commands will fail + // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default. + // Default is 3 seconds. + ReadTimeout time.Duration + // Timeout for socket writes. If reached, commands will fail + // with a timeout instead of blocking. + // Default is ReadTimeout. + WriteTimeout time.Duration + + // Type of connection pool. + // true for FIFO pool, false for LIFO pool. + // Note that fifo has higher overhead compared to lifo. + PoolFIFO bool + // Maximum number of socket connections. + // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS. + PoolSize int + // Minimum number of idle connections which is useful when establishing + // new connection is slow. + MinIdleConns int + // Connection age at which client retires (closes) the connection. + // Default is to not close aged connections. + MaxConnAge time.Duration + // Amount of time client waits for connection if all connections + // are busy before returning an error. + // Default is ReadTimeout + 1 second. + PoolTimeout time.Duration + // Amount of time after which client closes idle connections. + // Should be less than server's timeout. + // Default is 5 minutes. -1 disables idle timeout check. + IdleTimeout time.Duration + // Frequency of idle checks made by idle connections reaper. + // Default is 1 minute. -1 disables idle connections reaper, + // but idle connections are still discarded by the client + // if IdleTimeout is set. + IdleCheckFrequency time.Duration + + // Enables read only queries on slave nodes. + readOnly bool + + // TLS Config to use. When set TLS will be negotiated. + TLSConfig *tls.Config + + // Limiter interface used to implemented circuit breaker or rate limiter. + Limiter Limiter +} + +func (opt *Options) init() { + if opt.Addr == "" { + opt.Addr = "localhost:6379" + } + if opt.Network == "" { + if strings.HasPrefix(opt.Addr, "/") { + opt.Network = "unix" + } else { + opt.Network = "tcp" + } + } + if opt.DialTimeout == 0 { + opt.DialTimeout = 5 * time.Second + } + if opt.Dialer == nil { + opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) { + netDialer := &net.Dialer{ + Timeout: opt.DialTimeout, + KeepAlive: 5 * time.Minute, + } + if opt.TLSConfig == nil { + return netDialer.DialContext(ctx, network, addr) + } + return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig) + } + } + if opt.PoolSize == 0 { + opt.PoolSize = 10 * runtime.GOMAXPROCS(0) + } + switch opt.ReadTimeout { + case -1: + opt.ReadTimeout = 0 + case 0: + opt.ReadTimeout = 3 * time.Second + } + switch opt.WriteTimeout { + case -1: + opt.WriteTimeout = 0 + case 0: + opt.WriteTimeout = opt.ReadTimeout + } + if opt.PoolTimeout == 0 { + opt.PoolTimeout = opt.ReadTimeout + time.Second + } + if opt.IdleTimeout == 0 { + opt.IdleTimeout = 5 * time.Minute + } + if opt.IdleCheckFrequency == 0 { + opt.IdleCheckFrequency = time.Minute + } + + if opt.MaxRetries == -1 { + opt.MaxRetries = 0 + } else if opt.MaxRetries == 0 { + opt.MaxRetries = 3 + } + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } +} + +func (opt *Options) clone() *Options { + clone := *opt + return &clone +} + +// ParseURL parses an URL into Options that can be used to connect to Redis. +// Scheme is required. +// There are two connection types: by tcp socket and by unix socket. +// Tcp connection: +// redis://<user>:<password>@<host>:<port>/<db_number> +// Unix connection: +// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number> +// Most Option fields can be set using query parameters, with the following restrictions: +// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries +// - only scalar type fields are supported (bool, int, time.Duration) +// - for time.Duration fields, values must be a valid input for time.ParseDuration(); +// additionally a plain integer as value (i.e. without unit) is intepreted as seconds +// - to disable a duration field, use value less than or equal to 0; to use the default +// value, leave the value blank or remove the parameter +// - only the last value is interpreted if a parameter is given multiple times +// - fields "network", "addr", "username" and "password" can only be set using other +// URL attributes (scheme, host, userinfo, resp.), query paremeters using these +// names will be treated as unknown parameters +// - unknown parameter names will result in an error +// Examples: +// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2 +// is equivalent to: +// &Options{ +// Network: "tcp", +// Addr: "localhost:6789", +// DB: 1, // path "/3" was overridden by "&db=1" +// DialTimeout: 3 * time.Second, // no time unit = seconds +// ReadTimeout: 6 * time.Second, +// MaxRetries: 2, +// } +func ParseURL(redisURL string) (*Options, error) { + u, err := url.Parse(redisURL) + if err != nil { + return nil, err + } + + switch u.Scheme { + case "redis", "rediss": + return setupTCPConn(u) + case "unix": + return setupUnixConn(u) + default: + return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) + } +} + +func setupTCPConn(u *url.URL) (*Options, error) { + o := &Options{Network: "tcp"} + + o.Username, o.Password = getUserPassword(u) + + h, p, err := net.SplitHostPort(u.Host) + if err != nil { + h = u.Host + } + if h == "" { + h = "localhost" + } + if p == "" { + p = "6379" + } + o.Addr = net.JoinHostPort(h, p) + + f := strings.FieldsFunc(u.Path, func(r rune) bool { + return r == '/' + }) + switch len(f) { + case 0: + o.DB = 0 + case 1: + if o.DB, err = strconv.Atoi(f[0]); err != nil { + return nil, fmt.Errorf("redis: invalid database number: %q", f[0]) + } + default: + return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path) + } + + if u.Scheme == "rediss" { + o.TLSConfig = &tls.Config{ServerName: h} + } + + return setupConnParams(u, o) +} + +func setupUnixConn(u *url.URL) (*Options, error) { + o := &Options{ + Network: "unix", + } + + if strings.TrimSpace(u.Path) == "" { // path is required with unix connection + return nil, errors.New("redis: empty unix socket path") + } + o.Addr = u.Path + o.Username, o.Password = getUserPassword(u) + return setupConnParams(u, o) +} + +type queryOptions struct { + q url.Values + err error +} + +func (o *queryOptions) string(name string) string { + vs := o.q[name] + if len(vs) == 0 { + return "" + } + delete(o.q, name) // enable detection of unknown parameters + return vs[len(vs)-1] +} + +func (o *queryOptions) int(name string) int { + s := o.string(name) + if s == "" { + return 0 + } + i, err := strconv.Atoi(s) + if err == nil { + return i + } + if o.err == nil { + o.err = fmt.Errorf("redis: invalid %s number: %s", name, err) + } + return 0 +} + +func (o *queryOptions) duration(name string) time.Duration { + s := o.string(name) + if s == "" { + return 0 + } + // try plain number first + if i, err := strconv.Atoi(s); err == nil { + if i <= 0 { + // disable timeouts + return -1 + } + return time.Duration(i) * time.Second + } + dur, err := time.ParseDuration(s) + if err == nil { + return dur + } + if o.err == nil { + o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err) + } + return 0 +} + +func (o *queryOptions) bool(name string) bool { + switch s := o.string(name); s { + case "true", "1": + return true + case "false", "0", "": + return false + default: + if o.err == nil { + o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s) + } + return false + } +} + +func (o *queryOptions) remaining() []string { + if len(o.q) == 0 { + return nil + } + keys := make([]string, 0, len(o.q)) + for k := range o.q { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// setupConnParams converts query parameters in u to option value in o. +func setupConnParams(u *url.URL, o *Options) (*Options, error) { + q := queryOptions{q: u.Query()} + + // compat: a future major release may use q.int("db") + if tmp := q.string("db"); tmp != "" { + db, err := strconv.Atoi(tmp) + if err != nil { + return nil, fmt.Errorf("redis: invalid database number: %w", err) + } + o.DB = db + } + + o.MaxRetries = q.int("max_retries") + o.MinRetryBackoff = q.duration("min_retry_backoff") + o.MaxRetryBackoff = q.duration("max_retry_backoff") + o.DialTimeout = q.duration("dial_timeout") + o.ReadTimeout = q.duration("read_timeout") + o.WriteTimeout = q.duration("write_timeout") + o.PoolFIFO = q.bool("pool_fifo") + o.PoolSize = q.int("pool_size") + o.MinIdleConns = q.int("min_idle_conns") + o.MaxConnAge = q.duration("max_conn_age") + o.PoolTimeout = q.duration("pool_timeout") + o.IdleTimeout = q.duration("idle_timeout") + o.IdleCheckFrequency = q.duration("idle_check_frequency") + if q.err != nil { + return nil, q.err + } + + // any parameters left? + if r := q.remaining(); len(r) > 0 { + return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", ")) + } + + return o, nil +} + +func getUserPassword(u *url.URL) (string, string) { + var user, password string + if u.User != nil { + user = u.User.Username() + if p, ok := u.User.Password(); ok { + password = p + } + } + return user, password +} + +func newConnPool(opt *Options) *pool.ConnPool { + return pool.NewConnPool(&pool.Options{ + Dialer: func(ctx context.Context) (net.Conn, error) { + return opt.Dialer(ctx, opt.Network, opt.Addr) + }, + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + }) +} diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go new file mode 100644 index 0000000000..31bab971e6 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/pipeline.go @@ -0,0 +1,147 @@ +package redis + +import ( + "context" + "sync" + + "github.com/go-redis/redis/v8/internal/pool" +) + +type pipelineExecer func(context.Context, []Cmder) error + +// Pipeliner is an mechanism to realise Redis Pipeline technique. +// +// Pipelining is a technique to extremely speed up processing by packing +// operations to batches, send them at once to Redis and read a replies in a +// singe step. +// See https://redis.io/topics/pipelining +// +// Pay attention, that Pipeline is not a transaction, so you can get unexpected +// results in case of big pipelines and small read/write timeouts. +// Redis client has retransmission logic in case of timeouts, pipeline +// can be retransmitted and commands can be executed more then once. +// To avoid this: it is good idea to use reasonable bigger read/write timeouts +// depends of your batch size and/or use TxPipeline. +type Pipeliner interface { + StatefulCmdable + Len() int + Do(ctx context.Context, args ...interface{}) *Cmd + Process(ctx context.Context, cmd Cmder) error + Close() error + Discard() error + Exec(ctx context.Context) ([]Cmder, error) +} + +var _ Pipeliner = (*Pipeline)(nil) + +// Pipeline implements pipelining as described in +// http://redis.io/topics/pipelining. It's safe for concurrent use +// by multiple goroutines. +type Pipeline struct { + cmdable + statefulCmdable + + ctx context.Context + exec pipelineExecer + + mu sync.Mutex + cmds []Cmder + closed bool +} + +func (c *Pipeline) init() { + c.cmdable = c.Process + c.statefulCmdable = c.Process +} + +// Len returns the number of queued commands. +func (c *Pipeline) Len() int { + c.mu.Lock() + ln := len(c.cmds) + c.mu.Unlock() + return ln +} + +// Do queues the custom command for later execution. +func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +// Process queues the cmd for later execution. +func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error { + c.mu.Lock() + c.cmds = append(c.cmds, cmd) + c.mu.Unlock() + return nil +} + +// Close closes the pipeline, releasing any open resources. +func (c *Pipeline) Close() error { + c.mu.Lock() + _ = c.discard() + c.closed = true + c.mu.Unlock() + return nil +} + +// Discard resets the pipeline and discards queued commands. +func (c *Pipeline) Discard() error { + c.mu.Lock() + err := c.discard() + c.mu.Unlock() + return err +} + +func (c *Pipeline) discard() error { + if c.closed { + return pool.ErrClosed + } + c.cmds = c.cmds[:0] + return nil +} + +// Exec executes all previously queued commands using one +// client-server roundtrip. +// +// Exec always returns list of commands and error of the first failed +// command if any. +func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil, pool.ErrClosed + } + + if len(c.cmds) == 0 { + return nil, nil + } + + cmds := c.cmds + c.cmds = nil + + return cmds, c.exec(ctx, cmds) +} + +func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + if err := fn(c); err != nil { + return nil, err + } + cmds, err := c.Exec(ctx) + _ = c.Close() + return cmds, err +} + +func (c *Pipeline) Pipeline() Pipeliner { + return c +} + +func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipelined(ctx, fn) +} + +func (c *Pipeline) TxPipeline() Pipeliner { + return c +} diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go new file mode 100644 index 0000000000..efc2354af0 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/pubsub.go @@ -0,0 +1,668 @@ +package redis + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" +) + +// PubSub implements Pub/Sub commands as described in +// http://redis.io/topics/pubsub. Message receiving is NOT safe +// for concurrent use by multiple goroutines. +// +// PubSub automatically reconnects to Redis Server and resubscribes +// to the channels in case of network errors. +type PubSub struct { + opt *Options + + newConn func(ctx context.Context, channels []string) (*pool.Conn, error) + closeConn func(*pool.Conn) error + + mu sync.Mutex + cn *pool.Conn + channels map[string]struct{} + patterns map[string]struct{} + + closed bool + exit chan struct{} + + cmd *Cmd + + chOnce sync.Once + msgCh *channel + allCh *channel +} + +func (c *PubSub) init() { + c.exit = make(chan struct{}) +} + +func (c *PubSub) String() string { + channels := mapKeys(c.channels) + channels = append(channels, mapKeys(c.patterns)...) + return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", ")) +} + +func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) { + c.mu.Lock() + cn, err := c.conn(ctx, nil) + c.mu.Unlock() + return cn, err +} + +func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) { + if c.closed { + return nil, pool.ErrClosed + } + if c.cn != nil { + return c.cn, nil + } + + channels := mapKeys(c.channels) + channels = append(channels, newChannels...) + + cn, err := c.newConn(ctx, channels) + if err != nil { + return nil, err + } + + if err := c.resubscribe(ctx, cn); err != nil { + _ = c.closeConn(cn) + return nil, err + } + + c.cn = cn + return cn, nil +} + +func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error { + return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmd(wr, cmd) + }) +} + +func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error { + var firstErr error + + if len(c.channels) > 0 { + firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels)) + } + + if len(c.patterns) > 0 { + err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns)) + if err != nil && firstErr == nil { + firstErr = err + } + } + + return firstErr +} + +func mapKeys(m map[string]struct{}) []string { + s := make([]string, len(m)) + i := 0 + for k := range m { + s[i] = k + i++ + } + return s +} + +func (c *PubSub) _subscribe( + ctx context.Context, cn *pool.Conn, redisCmd string, channels []string, +) error { + args := make([]interface{}, 0, 1+len(channels)) + args = append(args, redisCmd) + for _, channel := range channels { + args = append(args, channel) + } + cmd := NewSliceCmd(ctx, args...) + return c.writeCmd(ctx, cn, cmd) +} + +func (c *PubSub) releaseConnWithLock( + ctx context.Context, + cn *pool.Conn, + err error, + allowTimeout bool, +) { + c.mu.Lock() + c.releaseConn(ctx, cn, err, allowTimeout) + c.mu.Unlock() +} + +func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) { + if c.cn != cn { + return + } + if isBadConn(err, allowTimeout, c.opt.Addr) { + c.reconnect(ctx, err) + } +} + +func (c *PubSub) reconnect(ctx context.Context, reason error) { + _ = c.closeTheCn(reason) + _, _ = c.conn(ctx, nil) +} + +func (c *PubSub) closeTheCn(reason error) error { + if c.cn == nil { + return nil + } + if !c.closed { + internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason) + } + err := c.closeConn(c.cn) + c.cn = nil + return err +} + +func (c *PubSub) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return pool.ErrClosed + } + c.closed = true + close(c.exit) + + return c.closeTheCn(pool.ErrClosed) +} + +// Subscribe the client to the specified channels. It returns +// empty subscription if there are no channels. +func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error { + c.mu.Lock() + defer c.mu.Unlock() + + err := c.subscribe(ctx, "subscribe", channels...) + if c.channels == nil { + c.channels = make(map[string]struct{}) + } + for _, s := range channels { + c.channels[s] = struct{}{} + } + return err +} + +// PSubscribe the client to the given patterns. It returns +// empty subscription if there are no patterns. +func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error { + c.mu.Lock() + defer c.mu.Unlock() + + err := c.subscribe(ctx, "psubscribe", patterns...) + if c.patterns == nil { + c.patterns = make(map[string]struct{}) + } + for _, s := range patterns { + c.patterns[s] = struct{}{} + } + return err +} + +// Unsubscribe the client from the given channels, or from all of +// them if none is given. +func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error { + c.mu.Lock() + defer c.mu.Unlock() + + for _, channel := range channels { + delete(c.channels, channel) + } + err := c.subscribe(ctx, "unsubscribe", channels...) + return err +} + +// PUnsubscribe the client from the given patterns, or from all of +// them if none is given. +func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error { + c.mu.Lock() + defer c.mu.Unlock() + + for _, pattern := range patterns { + delete(c.patterns, pattern) + } + err := c.subscribe(ctx, "punsubscribe", patterns...) + return err +} + +func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error { + cn, err := c.conn(ctx, channels) + if err != nil { + return err + } + + err = c._subscribe(ctx, cn, redisCmd, channels) + c.releaseConn(ctx, cn, err, false) + return err +} + +func (c *PubSub) Ping(ctx context.Context, payload ...string) error { + args := []interface{}{"ping"} + if len(payload) == 1 { + args = append(args, payload[0]) + } + cmd := NewCmd(ctx, args...) + + c.mu.Lock() + defer c.mu.Unlock() + + cn, err := c.conn(ctx, nil) + if err != nil { + return err + } + + err = c.writeCmd(ctx, cn, cmd) + c.releaseConn(ctx, cn, err, false) + return err +} + +// Subscription received after a successful subscription to channel. +type Subscription struct { + // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". + Kind string + // Channel name we have subscribed to. + Channel string + // Number of channels we are currently subscribed to. + Count int +} + +func (m *Subscription) String() string { + return fmt.Sprintf("%s: %s", m.Kind, m.Channel) +} + +// Message received as result of a PUBLISH command issued by another client. +type Message struct { + Channel string + Pattern string + Payload string + PayloadSlice []string +} + +func (m *Message) String() string { + return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload) +} + +// Pong received as result of a PING command issued by another client. +type Pong struct { + Payload string +} + +func (p *Pong) String() string { + if p.Payload != "" { + return fmt.Sprintf("Pong<%s>", p.Payload) + } + return "Pong" +} + +func (c *PubSub) newMessage(reply interface{}) (interface{}, error) { + switch reply := reply.(type) { + case string: + return &Pong{ + Payload: reply, + }, nil + case []interface{}: + switch kind := reply[0].(string); kind { + case "subscribe", "unsubscribe", "psubscribe", "punsubscribe": + // Can be nil in case of "unsubscribe". + channel, _ := reply[1].(string) + return &Subscription{ + Kind: kind, + Channel: channel, + Count: int(reply[2].(int64)), + }, nil + case "message": + switch payload := reply[2].(type) { + case string: + return &Message{ + Channel: reply[1].(string), + Payload: payload, + }, nil + case []interface{}: + ss := make([]string, len(payload)) + for i, s := range payload { + ss[i] = s.(string) + } + return &Message{ + Channel: reply[1].(string), + PayloadSlice: ss, + }, nil + default: + return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload) + } + case "pmessage": + return &Message{ + Pattern: reply[1].(string), + Channel: reply[2].(string), + Payload: reply[3].(string), + }, nil + case "pong": + return &Pong{ + Payload: reply[1].(string), + }, nil + default: + return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind) + } + default: + return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply) + } +} + +// ReceiveTimeout acts like Receive but returns an error if message +// is not received in time. This is low-level API and in most cases +// Channel should be used instead. +func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) { + if c.cmd == nil { + c.cmd = NewCmd(ctx) + } + + // Don't hold the lock to allow subscriptions and pings. + + cn, err := c.connWithLock(ctx) + if err != nil { + return nil, err + } + + err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error { + return c.cmd.readReply(rd) + }) + + c.releaseConnWithLock(ctx, cn, err, timeout > 0) + + if err != nil { + return nil, err + } + + return c.newMessage(c.cmd.Val()) +} + +// Receive returns a message as a Subscription, Message, Pong or error. +// See PubSub example for details. This is low-level API and in most cases +// Channel should be used instead. +func (c *PubSub) Receive(ctx context.Context) (interface{}, error) { + return c.ReceiveTimeout(ctx, 0) +} + +// ReceiveMessage returns a Message or error ignoring Subscription and Pong +// messages. This is low-level API and in most cases Channel should be used +// instead. +func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) { + for { + msg, err := c.Receive(ctx) + if err != nil { + return nil, err + } + + switch msg := msg.(type) { + case *Subscription: + // Ignore. + case *Pong: + // Ignore. + case *Message: + return msg, nil + default: + err := fmt.Errorf("redis: unknown message: %T", msg) + return nil, err + } + } +} + +func (c *PubSub) getContext() context.Context { + if c.cmd != nil { + return c.cmd.ctx + } + return context.Background() +} + +//------------------------------------------------------------------------------ + +// Channel returns a Go channel for concurrently receiving messages. +// The channel is closed together with the PubSub. If the Go channel +// is blocked full for 30 seconds the message is dropped. +// Receive* APIs can not be used after channel is created. +// +// go-redis periodically sends ping messages to test connection health +// and re-subscribes if ping can not not received for 30 seconds. +func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message { + c.chOnce.Do(func() { + c.msgCh = newChannel(c, opts...) + c.msgCh.initMsgChan() + }) + if c.msgCh == nil { + err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions") + panic(err) + } + return c.msgCh.msgCh +} + +// ChannelSize is like Channel, but creates a Go channel +// with specified buffer size. +// +// Deprecated: use Channel(WithChannelSize(size)), remove in v9. +func (c *PubSub) ChannelSize(size int) <-chan *Message { + return c.Channel(WithChannelSize(size)) +} + +// ChannelWithSubscriptions is like Channel, but message type can be either +// *Subscription or *Message. Subscription messages can be used to detect +// reconnections. +// +// ChannelWithSubscriptions can not be used together with Channel or ChannelSize. +func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} { + c.chOnce.Do(func() { + c.allCh = newChannel(c, WithChannelSize(size)) + c.allCh.initAllChan() + }) + if c.allCh == nil { + err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel") + panic(err) + } + return c.allCh.allCh +} + +type ChannelOption func(c *channel) + +// WithChannelSize specifies the Go chan size that is used to buffer incoming messages. +// +// The default is 100 messages. +func WithChannelSize(size int) ChannelOption { + return func(c *channel) { + c.chanSize = size + } +} + +// WithChannelHealthCheckInterval specifies the health check interval. +// PubSub will ping Redis Server if it does not receive any messages within the interval. +// To disable health check, use zero interval. +// +// The default is 3 seconds. +func WithChannelHealthCheckInterval(d time.Duration) ChannelOption { + return func(c *channel) { + c.checkInterval = d + } +} + +// WithChannelSendTimeout specifies the channel send timeout after which +// the message is dropped. +// +// The default is 60 seconds. +func WithChannelSendTimeout(d time.Duration) ChannelOption { + return func(c *channel) { + c.chanSendTimeout = d + } +} + +type channel struct { + pubSub *PubSub + + msgCh chan *Message + allCh chan interface{} + ping chan struct{} + + chanSize int + chanSendTimeout time.Duration + checkInterval time.Duration +} + +func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel { + c := &channel{ + pubSub: pubSub, + + chanSize: 100, + chanSendTimeout: time.Minute, + checkInterval: 3 * time.Second, + } + for _, opt := range opts { + opt(c) + } + if c.checkInterval > 0 { + c.initHealthCheck() + } + return c +} + +func (c *channel) initHealthCheck() { + ctx := context.TODO() + c.ping = make(chan struct{}, 1) + + go func() { + timer := time.NewTimer(time.Minute) + timer.Stop() + + for { + timer.Reset(c.checkInterval) + select { + case <-c.ping: + if !timer.Stop() { + <-timer.C + } + case <-timer.C: + if pingErr := c.pubSub.Ping(ctx); pingErr != nil { + c.pubSub.mu.Lock() + c.pubSub.reconnect(ctx, pingErr) + c.pubSub.mu.Unlock() + } + case <-c.pubSub.exit: + return + } + } + }() +} + +// initMsgChan must be in sync with initAllChan. +func (c *channel) initMsgChan() { + ctx := context.TODO() + c.msgCh = make(chan *Message, c.chanSize) + + go func() { + timer := time.NewTimer(time.Minute) + timer.Stop() + + var errCount int + for { + msg, err := c.pubSub.Receive(ctx) + if err != nil { + if err == pool.ErrClosed { + close(c.msgCh) + return + } + if errCount > 0 { + time.Sleep(100 * time.Millisecond) + } + errCount++ + continue + } + + errCount = 0 + + // Any message is as good as a ping. + select { + case c.ping <- struct{}{}: + default: + } + + switch msg := msg.(type) { + case *Subscription: + // Ignore. + case *Pong: + // Ignore. + case *Message: + timer.Reset(c.chanSendTimeout) + select { + case c.msgCh <- msg: + if !timer.Stop() { + <-timer.C + } + case <-timer.C: + internal.Logger.Printf( + ctx, "redis: %s channel is full for %s (message is dropped)", + c, c.chanSendTimeout) + } + default: + internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg) + } + } + }() +} + +// initAllChan must be in sync with initMsgChan. +func (c *channel) initAllChan() { + ctx := context.TODO() + c.allCh = make(chan interface{}, c.chanSize) + + go func() { + timer := time.NewTimer(time.Minute) + timer.Stop() + + var errCount int + for { + msg, err := c.pubSub.Receive(ctx) + if err != nil { + if err == pool.ErrClosed { + close(c.allCh) + return + } + if errCount > 0 { + time.Sleep(100 * time.Millisecond) + } + errCount++ + continue + } + + errCount = 0 + + // Any message is as good as a ping. + select { + case c.ping <- struct{}{}: + default: + } + + switch msg := msg.(type) { + case *Pong: + // Ignore. + case *Subscription, *Message: + timer.Reset(c.chanSendTimeout) + select { + case c.allCh <- msg: + if !timer.Stop() { + <-timer.C + } + case <-timer.C: + internal.Logger.Printf( + ctx, "redis: %s channel is full for %s (message is dropped)", + c, c.chanSendTimeout) + } + default: + internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg) + } + } + }() +} diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go new file mode 100644 index 0000000000..bcf8a2a94b --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/redis.go @@ -0,0 +1,773 @@ +package redis + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" +) + +// Nil reply returned by Redis when key does not exist. +const Nil = proto.Nil + +func SetLogger(logger internal.Logging) { + internal.Logger = logger +} + +//------------------------------------------------------------------------------ + +type Hook interface { + BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error) + AfterProcess(ctx context.Context, cmd Cmder) error + + BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) + AfterProcessPipeline(ctx context.Context, cmds []Cmder) error +} + +type hooks struct { + hooks []Hook +} + +func (hs *hooks) lock() { + hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)] +} + +func (hs hooks) clone() hooks { + clone := hs + clone.lock() + return clone +} + +func (hs *hooks) AddHook(hook Hook) { + hs.hooks = append(hs.hooks, hook) +} + +func (hs hooks) process( + ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error, +) error { + if len(hs.hooks) == 0 { + err := fn(ctx, cmd) + cmd.SetErr(err) + return err + } + + var hookIndex int + var retErr error + + for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ { + ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd) + if retErr != nil { + cmd.SetErr(retErr) + } + } + + if retErr == nil { + retErr = fn(ctx, cmd) + cmd.SetErr(retErr) + } + + for hookIndex--; hookIndex >= 0; hookIndex-- { + if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil { + retErr = err + cmd.SetErr(retErr) + } + } + + return retErr +} + +func (hs hooks) processPipeline( + ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error, +) error { + if len(hs.hooks) == 0 { + err := fn(ctx, cmds) + return err + } + + var hookIndex int + var retErr error + + for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ { + ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds) + if retErr != nil { + setCmdsErr(cmds, retErr) + } + } + + if retErr == nil { + retErr = fn(ctx, cmds) + } + + for hookIndex--; hookIndex >= 0; hookIndex-- { + if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil { + retErr = err + setCmdsErr(cmds, retErr) + } + } + + return retErr +} + +func (hs hooks) processTxPipeline( + ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error, +) error { + cmds = wrapMultiExec(ctx, cmds) + return hs.processPipeline(ctx, cmds, fn) +} + +//------------------------------------------------------------------------------ + +type baseClient struct { + opt *Options + connPool pool.Pooler + + onClose func() error // hook called when client is closed +} + +func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient { + return &baseClient{ + opt: opt, + connPool: connPool, + } +} + +func (c *baseClient) clone() *baseClient { + clone := *c + return &clone +} + +func (c *baseClient) withTimeout(timeout time.Duration) *baseClient { + opt := c.opt.clone() + opt.ReadTimeout = timeout + opt.WriteTimeout = timeout + + clone := c.clone() + clone.opt = opt + + return clone +} + +func (c *baseClient) String() string { + return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) +} + +func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) { + cn, err := c.connPool.NewConn(ctx) + if err != nil { + return nil, err + } + + err = c.initConn(ctx, cn) + if err != nil { + _ = c.connPool.CloseConn(cn) + return nil, err + } + + return cn, nil +} + +func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) { + if c.opt.Limiter != nil { + err := c.opt.Limiter.Allow() + if err != nil { + return nil, err + } + } + + cn, err := c._getConn(ctx) + if err != nil { + if c.opt.Limiter != nil { + c.opt.Limiter.ReportResult(err) + } + return nil, err + } + + return cn, nil +} + +func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { + cn, err := c.connPool.Get(ctx) + if err != nil { + return nil, err + } + + if cn.Inited { + return cn, nil + } + + if err := c.initConn(ctx, cn); err != nil { + c.connPool.Remove(ctx, cn, err) + if err := errors.Unwrap(err); err != nil { + return nil, err + } + return nil, err + } + + return cn, nil +} + +func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { + if cn.Inited { + return nil + } + cn.Inited = true + + if c.opt.Password == "" && + c.opt.DB == 0 && + !c.opt.readOnly && + c.opt.OnConnect == nil { + return nil + } + + connPool := pool.NewSingleConnPool(c.connPool, cn) + conn := newConn(ctx, c.opt, connPool) + + _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error { + if c.opt.Password != "" { + if c.opt.Username != "" { + pipe.AuthACL(ctx, c.opt.Username, c.opt.Password) + } else { + pipe.Auth(ctx, c.opt.Password) + } + } + + if c.opt.DB > 0 { + pipe.Select(ctx, c.opt.DB) + } + + if c.opt.readOnly { + pipe.ReadOnly(ctx) + } + + return nil + }) + if err != nil { + return err + } + + if c.opt.OnConnect != nil { + return c.opt.OnConnect(ctx, conn) + } + return nil +} + +func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) { + if c.opt.Limiter != nil { + c.opt.Limiter.ReportResult(err) + } + + if isBadConn(err, false, c.opt.Addr) { + c.connPool.Remove(ctx, cn, err) + } else { + c.connPool.Put(ctx, cn) + } +} + +func (c *baseClient) withConn( + ctx context.Context, fn func(context.Context, *pool.Conn) error, +) error { + cn, err := c.getConn(ctx) + if err != nil { + return err + } + + defer func() { + c.releaseConn(ctx, cn, err) + }() + + done := ctx.Done() //nolint:ifshort + + if done == nil { + err = fn(ctx, cn) + return err + } + + errc := make(chan error, 1) + go func() { errc <- fn(ctx, cn) }() + + select { + case <-done: + _ = cn.Close() + // Wait for the goroutine to finish and send something. + <-errc + + err = ctx.Err() + return err + case err = <-errc: + return err + } +} + +func (c *baseClient) process(ctx context.Context, cmd Cmder) error { + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { + attempt := attempt + + retry, err := c._process(ctx, cmd, attempt) + if err == nil || !retry { + return err + } + + lastErr = err + } + return lastErr +} + +func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return false, err + } + } + + retryTimeout := uint32(1) + err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmd(wr, cmd) + }) + if err != nil { + return err + } + + err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply) + if err != nil { + if cmd.readTimeout() == nil { + atomic.StoreUint32(&retryTimeout, 1) + } + return err + } + + return nil + }) + if err == nil { + return false, nil + } + + retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1) + return retry, err +} + +func (c *baseClient) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { + if timeout := cmd.readTimeout(); timeout != nil { + t := *timeout + if t == 0 { + return 0 + } + return t + 10*time.Second + } + return c.opt.ReadTimeout +} + +// Close closes the client, releasing any open resources. +// +// It is rare to Close a Client, as the Client is meant to be +// long-lived and shared between many goroutines. +func (c *baseClient) Close() error { + var firstErr error + if c.onClose != nil { + if err := c.onClose(); err != nil { + firstErr = err + } + } + if err := c.connPool.Close(); err != nil && firstErr == nil { + firstErr = err + } + return firstErr +} + +func (c *baseClient) getAddr() string { + return c.opt.Addr +} + +func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds) +} + +func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds) +} + +type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error) + +func (c *baseClient) generalProcessPipeline( + ctx context.Context, cmds []Cmder, p pipelineProcessor, +) error { + err := c._generalProcessPipeline(ctx, cmds, p) + if err != nil { + setCmdsErr(cmds, err) + return err + } + return cmdsFirstErr(cmds) +} + +func (c *baseClient) _generalProcessPipeline( + ctx context.Context, cmds []Cmder, p pipelineProcessor, +) error { + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + var canRetry bool + lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { + var err error + canRetry, err = p(ctx, cn, cmds) + return err + }) + if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) { + return lastErr + } + } + return lastErr +} + +func (c *baseClient) pipelineProcessCmds( + ctx context.Context, cn *pool.Conn, cmds []Cmder, +) (bool, error) { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return true, err + } + + err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + return pipelineReadCmds(rd, cmds) + }) + return true, err +} + +func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { + for _, cmd := range cmds { + err := cmd.readReply(rd) + cmd.SetErr(err) + if err != nil && !isRedisError(err) { + return err + } + } + return nil +} + +func (c *baseClient) txPipelineProcessCmds( + ctx context.Context, cn *pool.Conn, cmds []Cmder, +) (bool, error) { + err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { + return writeCmds(wr, cmds) + }) + if err != nil { + return true, err + } + + err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { + statusCmd := cmds[0].(*StatusCmd) + // Trim multi and exec. + cmds = cmds[1 : len(cmds)-1] + + err := txPipelineReadQueued(rd, statusCmd, cmds) + if err != nil { + return err + } + + return pipelineReadCmds(rd, cmds) + }) + return false, err +} + +func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder { + if len(cmds) == 0 { + panic("not reached") + } + cmdCopy := make([]Cmder, len(cmds)+2) + cmdCopy[0] = NewStatusCmd(ctx, "multi") + copy(cmdCopy[1:], cmds) + cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec") + return cmdCopy +} + +func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error { + // Parse queued replies. + if err := statusCmd.readReply(rd); err != nil { + return err + } + + for range cmds { + if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) { + return err + } + } + + // Parse number of replies. + line, err := rd.ReadLine() + if err != nil { + if err == Nil { + err = TxFailedErr + } + return err + } + + switch line[0] { + case proto.ErrorReply: + return proto.ParseErrorReply(line) + case proto.ArrayReply: + // ok + default: + err := fmt.Errorf("redis: expected '*', but got line %q", line) + return err + } + + return nil +} + +//------------------------------------------------------------------------------ + +// Client is a Redis client representing a pool of zero or more +// underlying connections. It's safe for concurrent use by multiple +// goroutines. +type Client struct { + *baseClient + cmdable + hooks + ctx context.Context +} + +// NewClient returns a client to the Redis Server specified by Options. +func NewClient(opt *Options) *Client { + opt.init() + + c := Client{ + baseClient: newBaseClient(opt, newConnPool(opt)), + ctx: context.Background(), + } + c.cmdable = c.Process + + return &c +} + +func (c *Client) clone() *Client { + clone := *c + clone.cmdable = clone.Process + clone.hooks.lock() + return &clone +} + +func (c *Client) WithTimeout(timeout time.Duration) *Client { + clone := c.clone() + clone.baseClient = c.baseClient.withTimeout(timeout) + return clone +} + +func (c *Client) Context() context.Context { + return c.ctx +} + +func (c *Client) WithContext(ctx context.Context) *Client { + if ctx == nil { + panic("nil context") + } + clone := c.clone() + clone.ctx = ctx + return clone +} + +func (c *Client) Conn(ctx context.Context) *Conn { + return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool)) +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *Client) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.baseClient.process) +} + +func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) +} + +func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) +} + +// Options returns read-only Options that were used to create the client. +func (c *Client) Options() *Options { + return c.opt +} + +type PoolStats pool.Stats + +// PoolStats returns connection pool stats. +func (c *Client) PoolStats() *PoolStats { + stats := c.connPool.Stats() + return (*PoolStats)(stats) +} + +func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *Client) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processPipeline, + } + pipe.init() + return &pipe +} + +func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *Client) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processTxPipeline, + } + pipe.init() + return &pipe +} + +func (c *Client) pubSub() *PubSub { + pubsub := &PubSub{ + opt: c.opt, + + newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + return c.newConn(ctx) + }, + closeConn: c.connPool.CloseConn, + } + pubsub.init() + return pubsub +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +// Note that this method does not wait on a response from Redis, so the +// subscription may not be active immediately. To force the connection to wait, +// you may call the Receive() method on the returned *PubSub like so: +// +// sub := client.Subscribe(queryResp) +// iface, err := sub.Receive() +// if err != nil { +// // handle error +// } +// +// // Should be *Subscription, but others are possible if other actions have been +// // taken on sub since it was created. +// switch iface.(type) { +// case *Subscription: +// // subscribe succeeded +// case *Message: +// // received first message +// case *Pong: +// // pong received +// default: +// // handle error +// } +// +// ch := sub.Channel() +func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(ctx, channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(ctx, channels...) + } + return pubsub +} + +//------------------------------------------------------------------------------ + +type conn struct { + baseClient + cmdable + statefulCmdable + hooks // TODO: inherit hooks +} + +// Conn represents a single Redis connection rather than a pool of connections. +// Prefer running commands from Client unless there is a specific need +// for a continuous single Redis connection. +type Conn struct { + *conn + ctx context.Context +} + +func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn { + c := Conn{ + conn: &conn{ + baseClient: baseClient{ + opt: opt, + connPool: connPool, + }, + }, + ctx: ctx, + } + c.cmdable = c.Process + c.statefulCmdable = c.Process + return &c +} + +func (c *Conn) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.baseClient.process) +} + +func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) +} + +func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) +} + +func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *Conn) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processPipeline, + } + pipe.init() + return &pipe +} + +func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. +func (c *Conn) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processTxPipeline, + } + pipe.init() + return &pipe +} diff --git a/vendor/github.com/go-redis/redis/v8/result.go b/vendor/github.com/go-redis/redis/v8/result.go new file mode 100644 index 0000000000..24cfd49940 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/result.go @@ -0,0 +1,180 @@ +package redis + +import "time" + +// NewCmdResult returns a Cmd initialised with val and err for testing. +func NewCmdResult(val interface{}, err error) *Cmd { + var cmd Cmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewSliceResult returns a SliceCmd initialised with val and err for testing. +func NewSliceResult(val []interface{}, err error) *SliceCmd { + var cmd SliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewStatusResult returns a StatusCmd initialised with val and err for testing. +func NewStatusResult(val string, err error) *StatusCmd { + var cmd StatusCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewIntResult returns an IntCmd initialised with val and err for testing. +func NewIntResult(val int64, err error) *IntCmd { + var cmd IntCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewDurationResult returns a DurationCmd initialised with val and err for testing. +func NewDurationResult(val time.Duration, err error) *DurationCmd { + var cmd DurationCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewBoolResult returns a BoolCmd initialised with val and err for testing. +func NewBoolResult(val bool, err error) *BoolCmd { + var cmd BoolCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewStringResult returns a StringCmd initialised with val and err for testing. +func NewStringResult(val string, err error) *StringCmd { + var cmd StringCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewFloatResult returns a FloatCmd initialised with val and err for testing. +func NewFloatResult(val float64, err error) *FloatCmd { + var cmd FloatCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing. +func NewStringSliceResult(val []string, err error) *StringSliceCmd { + var cmd StringSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing. +func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd { + var cmd BoolSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing. +func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd { + var cmd StringStringMapCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing. +func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd { + var cmd StringIntMapCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing. +func NewTimeCmdResult(val time.Time, err error) *TimeCmd { + var cmd TimeCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing. +func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd { + var cmd ZSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing. +func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd { + var cmd ZWithKeyCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewScanCmdResult returns a ScanCmd initialised with val and err for testing. +func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd { + var cmd ScanCmd + cmd.page = keys + cmd.cursor = cursor + cmd.SetErr(err) + return &cmd +} + +// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing. +func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd { + var cmd ClusterSlotsCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing. +func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd { + var cmd GeoLocationCmd + cmd.locations = val + cmd.SetErr(err) + return &cmd +} + +// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing. +func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd { + var cmd GeoPosCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing. +func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd { + var cmd CommandsInfoCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing. +func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd { + var cmd XMessageSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} + +// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing. +func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd { + var cmd XStreamSliceCmd + cmd.val = val + cmd.SetErr(err) + return &cmd +} diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go new file mode 100644 index 0000000000..4df00fc857 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/ring.go @@ -0,0 +1,736 @@ +package redis + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/cespare/xxhash/v2" + rendezvous "github.com/dgryski/go-rendezvous" //nolint + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/hashtag" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/rand" +) + +var errRingShardsDown = errors.New("redis: all ring shards are down") + +//------------------------------------------------------------------------------ + +type ConsistentHash interface { + Get(string) string +} + +type rendezvousWrapper struct { + *rendezvous.Rendezvous +} + +func (w rendezvousWrapper) Get(key string) string { + return w.Lookup(key) +} + +func newRendezvous(shards []string) ConsistentHash { + return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)} +} + +//------------------------------------------------------------------------------ + +// RingOptions are used to configure a ring client and should be +// passed to NewRing. +type RingOptions struct { + // Map of name => host:port addresses of ring shards. + Addrs map[string]string + + // NewClient creates a shard client with provided name and options. + NewClient func(name string, opt *Options) *Client + + // Frequency of PING commands sent to check shards availability. + // Shard is considered down after 3 subsequent failed checks. + HeartbeatFrequency time.Duration + + // NewConsistentHash returns a consistent hash that is used + // to distribute keys across the shards. + // + // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8 + // for consistent hashing algorithmic tradeoffs. + NewConsistentHash func(shards []string) ConsistentHash + + // Following options are copied from Options struct. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + OnConnect func(ctx context.Context, cn *Conn) error + + Username string + Password string + DB int + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). + PoolFIFO bool + + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config + Limiter Limiter +} + +func (opt *RingOptions) init() { + if opt.NewClient == nil { + opt.NewClient = func(name string, opt *Options) *Client { + return NewClient(opt) + } + } + + if opt.HeartbeatFrequency == 0 { + opt.HeartbeatFrequency = 500 * time.Millisecond + } + + if opt.NewConsistentHash == nil { + opt.NewConsistentHash = newRendezvous + } + + if opt.MaxRetries == -1 { + opt.MaxRetries = 0 + } else if opt.MaxRetries == 0 { + opt.MaxRetries = 3 + } + switch opt.MinRetryBackoff { + case -1: + opt.MinRetryBackoff = 0 + case 0: + opt.MinRetryBackoff = 8 * time.Millisecond + } + switch opt.MaxRetryBackoff { + case -1: + opt.MaxRetryBackoff = 0 + case 0: + opt.MaxRetryBackoff = 512 * time.Millisecond + } +} + +func (opt *RingOptions) clientOptions() *Options { + return &Options{ + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + Username: opt.Username, + Password: opt.Password, + DB: opt.DB, + + MaxRetries: -1, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + + TLSConfig: opt.TLSConfig, + Limiter: opt.Limiter, + } +} + +//------------------------------------------------------------------------------ + +type ringShard struct { + Client *Client + down int32 +} + +func newRingShard(opt *RingOptions, name, addr string) *ringShard { + clopt := opt.clientOptions() + clopt.Addr = addr + + return &ringShard{ + Client: opt.NewClient(name, clopt), + } +} + +func (shard *ringShard) String() string { + var state string + if shard.IsUp() { + state = "up" + } else { + state = "down" + } + return fmt.Sprintf("%s is %s", shard.Client, state) +} + +func (shard *ringShard) IsDown() bool { + const threshold = 3 + return atomic.LoadInt32(&shard.down) >= threshold +} + +func (shard *ringShard) IsUp() bool { + return !shard.IsDown() +} + +// Vote votes to set shard state and returns true if state was changed. +func (shard *ringShard) Vote(up bool) bool { + if up { + changed := shard.IsDown() + atomic.StoreInt32(&shard.down, 0) + return changed + } + + if shard.IsDown() { + return false + } + + atomic.AddInt32(&shard.down, 1) + return shard.IsDown() +} + +//------------------------------------------------------------------------------ + +type ringShards struct { + opt *RingOptions + + mu sync.RWMutex + hash ConsistentHash + shards map[string]*ringShard // read only + list []*ringShard // read only + numShard int + closed bool +} + +func newRingShards(opt *RingOptions) *ringShards { + shards := make(map[string]*ringShard, len(opt.Addrs)) + list := make([]*ringShard, 0, len(shards)) + + for name, addr := range opt.Addrs { + shard := newRingShard(opt, name, addr) + shards[name] = shard + + list = append(list, shard) + } + + c := &ringShards{ + opt: opt, + + shards: shards, + list: list, + } + c.rebalance() + + return c +} + +func (c *ringShards) List() []*ringShard { + var list []*ringShard + + c.mu.RLock() + if !c.closed { + list = c.list + } + c.mu.RUnlock() + + return list +} + +func (c *ringShards) Hash(key string) string { + key = hashtag.Key(key) + + var hash string + + c.mu.RLock() + if c.numShard > 0 { + hash = c.hash.Get(key) + } + c.mu.RUnlock() + + return hash +} + +func (c *ringShards) GetByKey(key string) (*ringShard, error) { + key = hashtag.Key(key) + + c.mu.RLock() + + if c.closed { + c.mu.RUnlock() + return nil, pool.ErrClosed + } + + if c.numShard == 0 { + c.mu.RUnlock() + return nil, errRingShardsDown + } + + hash := c.hash.Get(key) + if hash == "" { + c.mu.RUnlock() + return nil, errRingShardsDown + } + + shard := c.shards[hash] + c.mu.RUnlock() + + return shard, nil +} + +func (c *ringShards) GetByName(shardName string) (*ringShard, error) { + if shardName == "" { + return c.Random() + } + + c.mu.RLock() + shard := c.shards[shardName] + c.mu.RUnlock() + return shard, nil +} + +func (c *ringShards) Random() (*ringShard, error) { + return c.GetByKey(strconv.Itoa(rand.Int())) +} + +// heartbeat monitors state of each shard in the ring. +func (c *ringShards) Heartbeat(frequency time.Duration) { + ticker := time.NewTicker(frequency) + defer ticker.Stop() + + ctx := context.Background() + for range ticker.C { + var rebalance bool + + for _, shard := range c.List() { + err := shard.Client.Ping(ctx).Err() + isUp := err == nil || err == pool.ErrPoolTimeout + if shard.Vote(isUp) { + internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard) + rebalance = true + } + } + + if rebalance { + c.rebalance() + } + } +} + +// rebalance removes dead shards from the Ring. +func (c *ringShards) rebalance() { + c.mu.RLock() + shards := c.shards + c.mu.RUnlock() + + liveShards := make([]string, 0, len(shards)) + + for name, shard := range shards { + if shard.IsUp() { + liveShards = append(liveShards, name) + } + } + + hash := c.opt.NewConsistentHash(liveShards) + + c.mu.Lock() + c.hash = hash + c.numShard = len(liveShards) + c.mu.Unlock() +} + +func (c *ringShards) Len() int { + c.mu.RLock() + l := c.numShard + c.mu.RUnlock() + return l +} + +func (c *ringShards) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + if c.closed { + return nil + } + c.closed = true + + var firstErr error + for _, shard := range c.shards { + if err := shard.Client.Close(); err != nil && firstErr == nil { + firstErr = err + } + } + c.hash = nil + c.shards = nil + c.list = nil + + return firstErr +} + +//------------------------------------------------------------------------------ + +type ring struct { + opt *RingOptions + shards *ringShards + cmdsInfoCache *cmdsInfoCache //nolint:structcheck +} + +// Ring is a Redis client that uses consistent hashing to distribute +// keys across multiple Redis servers (shards). It's safe for +// concurrent use by multiple goroutines. +// +// Ring monitors the state of each shard and removes dead shards from +// the ring. When a shard comes online it is added back to the ring. This +// gives you maximum availability and partition tolerance, but no +// consistency between different shards or even clients. Each client +// uses shards that are available to the client and does not do any +// coordination when shard state is changed. +// +// Ring should be used when you need multiple Redis servers for caching +// and can tolerate losing data when one of the servers dies. +// Otherwise you should use Redis Cluster. +type Ring struct { + *ring + cmdable + hooks + ctx context.Context +} + +func NewRing(opt *RingOptions) *Ring { + opt.init() + + ring := Ring{ + ring: &ring{ + opt: opt, + shards: newRingShards(opt), + }, + ctx: context.Background(), + } + + ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo) + ring.cmdable = ring.Process + + go ring.shards.Heartbeat(opt.HeartbeatFrequency) + + return &ring +} + +func (c *Ring) Context() context.Context { + return c.ctx +} + +func (c *Ring) WithContext(ctx context.Context) *Ring { + if ctx == nil { + panic("nil context") + } + clone := *c + clone.cmdable = clone.Process + clone.hooks.lock() + clone.ctx = ctx + return &clone +} + +// Do creates a Cmd from the args and processes the cmd. +func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd { + cmd := NewCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *Ring) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.process) +} + +// Options returns read-only Options that were used to create the client. +func (c *Ring) Options() *RingOptions { + return c.opt +} + +func (c *Ring) retryBackoff(attempt int) time.Duration { + return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) +} + +// PoolStats returns accumulated connection pool stats. +func (c *Ring) PoolStats() *PoolStats { + shards := c.shards.List() + var acc PoolStats + for _, shard := range shards { + s := shard.Client.connPool.Stats() + acc.Hits += s.Hits + acc.Misses += s.Misses + acc.Timeouts += s.Timeouts + acc.TotalConns += s.TotalConns + acc.IdleConns += s.IdleConns + } + return &acc +} + +// Len returns the current number of shards in the ring. +func (c *Ring) Len() int { + return c.shards.Len() +} + +// Subscribe subscribes the client to the specified channels. +func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub { + if len(channels) == 0 { + panic("at least one channel is required") + } + + shard, err := c.shards.GetByKey(channels[0]) + if err != nil { + // TODO: return PubSub with sticky error + panic(err) + } + return shard.Client.Subscribe(ctx, channels...) +} + +// PSubscribe subscribes the client to the given patterns. +func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub { + if len(channels) == 0 { + panic("at least one channel is required") + } + + shard, err := c.shards.GetByKey(channels[0]) + if err != nil { + // TODO: return PubSub with sticky error + panic(err) + } + return shard.Client.PSubscribe(ctx, channels...) +} + +// ForEachShard concurrently calls the fn on each live shard in the ring. +// It returns the first error if any. +func (c *Ring) ForEachShard( + ctx context.Context, + fn func(ctx context.Context, client *Client) error, +) error { + shards := c.shards.List() + var wg sync.WaitGroup + errCh := make(chan error, 1) + for _, shard := range shards { + if shard.IsDown() { + continue + } + + wg.Add(1) + go func(shard *ringShard) { + defer wg.Done() + err := fn(ctx, shard.Client) + if err != nil { + select { + case errCh <- err: + default: + } + } + }(shard) + } + wg.Wait() + + select { + case err := <-errCh: + return err + default: + return nil + } +} + +func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { + shards := c.shards.List() + var firstErr error + for _, shard := range shards { + cmdsInfo, err := shard.Client.Command(ctx).Result() + if err == nil { + return cmdsInfo, nil + } + if firstErr == nil { + firstErr = err + } + } + if firstErr == nil { + return nil, errRingShardsDown + } + return nil, firstErr +} + +func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo { + cmdsInfo, err := c.cmdsInfoCache.Get(ctx) + if err != nil { + return nil + } + info := cmdsInfo[name] + if info == nil { + internal.Logger.Printf(ctx, "info for cmd=%s not found", name) + } + return info +} + +func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) { + cmdInfo := c.cmdInfo(ctx, cmd.Name()) + pos := cmdFirstKeyPos(cmd, cmdInfo) + if pos == 0 { + return c.shards.Random() + } + firstKey := cmd.stringArg(pos) + return c.shards.GetByKey(firstKey) +} + +func (c *Ring) process(ctx context.Context, cmd Cmder) error { + var lastErr error + for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { + if attempt > 0 { + if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { + return err + } + } + + shard, err := c.cmdShard(ctx, cmd) + if err != nil { + return err + } + + lastErr = shard.Client.Process(ctx, cmd) + if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) { + return lastErr + } + } + return lastErr +} + +func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +func (c *Ring) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processPipeline, + } + pipe.init() + return &pipe +} + +func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return c.generalProcessPipeline(ctx, cmds, false) + }) +} + +func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +func (c *Ring) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: c.processTxPipeline, + } + pipe.init() + return &pipe +} + +func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { + return c.generalProcessPipeline(ctx, cmds, true) + }) +} + +func (c *Ring) generalProcessPipeline( + ctx context.Context, cmds []Cmder, tx bool, +) error { + cmdsMap := make(map[string][]Cmder) + for _, cmd := range cmds { + cmdInfo := c.cmdInfo(ctx, cmd.Name()) + hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo)) + if hash != "" { + hash = c.shards.Hash(hash) + } + cmdsMap[hash] = append(cmdsMap[hash], cmd) + } + + var wg sync.WaitGroup + for hash, cmds := range cmdsMap { + wg.Add(1) + go func(hash string, cmds []Cmder) { + defer wg.Done() + + _ = c.processShardPipeline(ctx, hash, cmds, tx) + }(hash, cmds) + } + + wg.Wait() + return cmdsFirstErr(cmds) +} + +func (c *Ring) processShardPipeline( + ctx context.Context, hash string, cmds []Cmder, tx bool, +) error { + // TODO: retry? + shard, err := c.shards.GetByName(hash) + if err != nil { + setCmdsErr(cmds, err) + return err + } + + if tx { + return shard.Client.processTxPipeline(ctx, cmds) + } + return shard.Client.processPipeline(ctx, cmds) +} + +func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { + if len(keys) == 0 { + return fmt.Errorf("redis: Watch requires at least one key") + } + + var shards []*ringShard + for _, key := range keys { + if key != "" { + shard, err := c.shards.GetByKey(hashtag.Key(key)) + if err != nil { + return err + } + + shards = append(shards, shard) + } + } + + if len(shards) == 0 { + return fmt.Errorf("redis: Watch requires at least one shard") + } + + if len(shards) > 1 { + for _, shard := range shards[1:] { + if shard.Client != shards[0].Client { + err := fmt.Errorf("redis: Watch requires all keys to be in the same shard") + return err + } + } + } + + return shards[0].Client.Watch(ctx, fn, keys...) +} + +// Close closes the ring client, releasing any open resources. +// +// It is rare to Close a Ring, as the Ring is meant to be long-lived +// and shared between many goroutines. +func (c *Ring) Close() error { + return c.shards.Close() +} diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go new file mode 100644 index 0000000000..5cab18d617 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/script.go @@ -0,0 +1,65 @@ +package redis + +import ( + "context" + "crypto/sha1" + "encoding/hex" + "io" + "strings" +) + +type Scripter interface { + Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd + EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd + ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd + ScriptLoad(ctx context.Context, script string) *StringCmd +} + +var ( + _ Scripter = (*Client)(nil) + _ Scripter = (*Ring)(nil) + _ Scripter = (*ClusterClient)(nil) +) + +type Script struct { + src, hash string +} + +func NewScript(src string) *Script { + h := sha1.New() + _, _ = io.WriteString(h, src) + return &Script{ + src: src, + hash: hex.EncodeToString(h.Sum(nil)), + } +} + +func (s *Script) Hash() string { + return s.hash +} + +func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd { + return c.ScriptLoad(ctx, s.src) +} + +func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd { + return c.ScriptExists(ctx, s.hash) +} + +func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { + return c.Eval(ctx, s.src, keys, args...) +} + +func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { + return c.EvalSha(ctx, s.hash, keys, args...) +} + +// Run optimistically uses EVALSHA to run the script. If script does not exist +// it is retried using EVAL. +func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { + r := s.EvalSha(ctx, c, keys, args...) + if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") { + return s.Eval(ctx, c, keys, args...) + } + return r +} diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go new file mode 100644 index 0000000000..ec6221dc83 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/sentinel.go @@ -0,0 +1,796 @@ +package redis + +import ( + "context" + "crypto/tls" + "errors" + "net" + "strings" + "sync" + "time" + + "github.com/go-redis/redis/v8/internal" + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/rand" +) + +//------------------------------------------------------------------------------ + +// FailoverOptions are used to configure a failover client and should +// be passed to NewFailoverClient. +type FailoverOptions struct { + // The master name. + MasterName string + // A seed list of host:port addresses of sentinel nodes. + SentinelAddrs []string + + // If specified with SentinelPassword, enables ACL-based authentication (via + // AUTH <user> <pass>). + SentinelUsername string + // Sentinel password from "requirepass <password>" (if enabled) in Sentinel + // configuration, or, if SentinelUsername is also supplied, used for ACL-based + // authentication. + SentinelPassword string + + // Allows routing read-only commands to the closest master or slave node. + // This option only works with NewFailoverClusterClient. + RouteByLatency bool + // Allows routing read-only commands to the random master or slave node. + // This option only works with NewFailoverClusterClient. + RouteRandomly bool + + // Route all commands to slave read-only nodes. + SlaveOnly bool + + // Use slaves disconnected with master when cannot get connected slaves + // Now, this option only works in RandomSlaveAddr function. + UseDisconnectedSlaves bool + + // Following options are copied from Options struct. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + OnConnect func(ctx context.Context, cn *Conn) error + + Username string + Password string + DB int + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). + PoolFIFO bool + + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config +} + +func (opt *FailoverOptions) clientOptions() *Options { + return &Options{ + Addr: "FailoverClient", + + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + DB: opt.DB, + Username: opt.Username, + Password: opt.Password, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + + TLSConfig: opt.TLSConfig, + } +} + +func (opt *FailoverOptions) sentinelOptions(addr string) *Options { + return &Options{ + Addr: addr, + + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + DB: 0, + Username: opt.SentinelUsername, + Password: opt.SentinelPassword, + + MaxRetries: opt.MaxRetries, + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + + TLSConfig: opt.TLSConfig, + } +} + +func (opt *FailoverOptions) clusterOptions() *ClusterOptions { + return &ClusterOptions{ + Dialer: opt.Dialer, + OnConnect: opt.OnConnect, + + Username: opt.Username, + Password: opt.Password, + + MaxRedirects: opt.MaxRetries, + + RouteByLatency: opt.RouteByLatency, + RouteRandomly: opt.RouteRandomly, + + MinRetryBackoff: opt.MinRetryBackoff, + MaxRetryBackoff: opt.MaxRetryBackoff, + + DialTimeout: opt.DialTimeout, + ReadTimeout: opt.ReadTimeout, + WriteTimeout: opt.WriteTimeout, + + PoolFIFO: opt.PoolFIFO, + PoolSize: opt.PoolSize, + PoolTimeout: opt.PoolTimeout, + IdleTimeout: opt.IdleTimeout, + IdleCheckFrequency: opt.IdleCheckFrequency, + MinIdleConns: opt.MinIdleConns, + MaxConnAge: opt.MaxConnAge, + + TLSConfig: opt.TLSConfig, + } +} + +// NewFailoverClient returns a Redis client that uses Redis Sentinel +// for automatic failover. It's safe for concurrent use by multiple +// goroutines. +func NewFailoverClient(failoverOpt *FailoverOptions) *Client { + if failoverOpt.RouteByLatency { + panic("to route commands by latency, use NewFailoverClusterClient") + } + if failoverOpt.RouteRandomly { + panic("to route commands randomly, use NewFailoverClusterClient") + } + + sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) + copy(sentinelAddrs, failoverOpt.SentinelAddrs) + + rand.Shuffle(len(sentinelAddrs), func(i, j int) { + sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i] + }) + + failover := &sentinelFailover{ + opt: failoverOpt, + sentinelAddrs: sentinelAddrs, + } + + opt := failoverOpt.clientOptions() + opt.Dialer = masterSlaveDialer(failover) + opt.init() + + connPool := newConnPool(opt) + + failover.mu.Lock() + failover.onFailover = func(ctx context.Context, addr string) { + _ = connPool.Filter(func(cn *pool.Conn) bool { + return cn.RemoteAddr().String() != addr + }) + } + failover.mu.Unlock() + + c := Client{ + baseClient: newBaseClient(opt, connPool), + ctx: context.Background(), + } + c.cmdable = c.Process + c.onClose = failover.Close + + return &c +} + +func masterSlaveDialer( + failover *sentinelFailover, +) func(ctx context.Context, network, addr string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { + var addr string + var err error + + if failover.opt.SlaveOnly { + addr, err = failover.RandomSlaveAddr(ctx) + } else { + addr, err = failover.MasterAddr(ctx) + if err == nil { + failover.trySwitchMaster(ctx, addr) + } + } + if err != nil { + return nil, err + } + if failover.opt.Dialer != nil { + return failover.opt.Dialer(ctx, network, addr) + } + + netDialer := &net.Dialer{ + Timeout: failover.opt.DialTimeout, + KeepAlive: 5 * time.Minute, + } + if failover.opt.TLSConfig == nil { + return netDialer.DialContext(ctx, network, addr) + } + return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig) + } +} + +//------------------------------------------------------------------------------ + +// SentinelClient is a client for a Redis Sentinel. +type SentinelClient struct { + *baseClient + hooks + ctx context.Context +} + +func NewSentinelClient(opt *Options) *SentinelClient { + opt.init() + c := &SentinelClient{ + baseClient: &baseClient{ + opt: opt, + connPool: newConnPool(opt), + }, + ctx: context.Background(), + } + return c +} + +func (c *SentinelClient) Context() context.Context { + return c.ctx +} + +func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient { + if ctx == nil { + panic("nil context") + } + clone := *c + clone.ctx = ctx + return &clone +} + +func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.baseClient.process) +} + +func (c *SentinelClient) pubSub() *PubSub { + pubsub := &PubSub{ + opt: c.opt, + + newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { + return c.newConn(ctx) + }, + closeConn: c.connPool.CloseConn, + } + pubsub.init() + return pubsub +} + +// Ping is used to test if a connection is still alive, or to +// measure latency. +func (c *SentinelClient) Ping(ctx context.Context) *StringCmd { + cmd := NewStringCmd(ctx, "ping") + _ = c.Process(ctx, cmd) + return cmd +} + +// Subscribe subscribes the client to the specified channels. +// Channels can be omitted to create empty subscription. +func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.Subscribe(ctx, channels...) + } + return pubsub +} + +// PSubscribe subscribes the client to the given patterns. +// Patterns can be omitted to create empty subscription. +func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { + pubsub := c.pubSub() + if len(channels) > 0 { + _ = pubsub.PSubscribe(ctx, channels...) + } + return pubsub +} + +func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name) + _ = c.Process(ctx, cmd) + return cmd +} + +func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd { + cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name) + _ = c.Process(ctx, cmd) + return cmd +} + +// Failover forces a failover as if the master was not reachable, and without +// asking for agreement to other Sentinels. +func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd { + cmd := NewStatusCmd(ctx, "sentinel", "failover", name) + _ = c.Process(ctx, cmd) + return cmd +} + +// Reset resets all the masters with matching name. The pattern argument is a +// glob-style pattern. The reset process clears any previous state in a master +// (including a failover in progress), and removes every slave and sentinel +// already discovered and associated with the master. +func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd { + cmd := NewIntCmd(ctx, "sentinel", "reset", pattern) + _ = c.Process(ctx, cmd) + return cmd +} + +// FlushConfig forces Sentinel to rewrite its configuration on disk, including +// the current Sentinel state. +func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd { + cmd := NewStatusCmd(ctx, "sentinel", "flushconfig") + _ = c.Process(ctx, cmd) + return cmd +} + +// Master shows the state and info of the specified master. +func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd { + cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name) + _ = c.Process(ctx, cmd) + return cmd +} + +// Masters shows a list of monitored masters and their state. +func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd { + cmd := NewSliceCmd(ctx, "sentinel", "masters") + _ = c.Process(ctx, cmd) + return cmd +} + +// Slaves shows a list of slaves for the specified master and their state. +func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd { + cmd := NewSliceCmd(ctx, "sentinel", "slaves", name) + _ = c.Process(ctx, cmd) + return cmd +} + +// CkQuorum checks if the current Sentinel configuration is able to reach the +// quorum needed to failover a master, and the majority needed to authorize the +// failover. This command should be used in monitoring systems to check if a +// Sentinel deployment is ok. +func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd { + cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name) + _ = c.Process(ctx, cmd) + return cmd +} + +// Monitor tells the Sentinel to start monitoring a new master with the specified +// name, ip, port, and quorum. +func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd { + cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum) + _ = c.Process(ctx, cmd) + return cmd +} + +// Set is used in order to change configuration parameters of a specific master. +func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd { + cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value) + _ = c.Process(ctx, cmd) + return cmd +} + +// Remove is used in order to remove the specified master: the master will no +// longer be monitored, and will totally be removed from the internal state of +// the Sentinel. +func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd { + cmd := NewStringCmd(ctx, "sentinel", "remove", name) + _ = c.Process(ctx, cmd) + return cmd +} + +//------------------------------------------------------------------------------ + +type sentinelFailover struct { + opt *FailoverOptions + + sentinelAddrs []string + + onFailover func(ctx context.Context, addr string) + onUpdate func(ctx context.Context) + + mu sync.RWMutex + _masterAddr string + sentinel *SentinelClient + pubsub *PubSub +} + +func (c *sentinelFailover) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + if c.sentinel != nil { + return c.closeSentinel() + } + return nil +} + +func (c *sentinelFailover) closeSentinel() error { + firstErr := c.pubsub.Close() + c.pubsub = nil + + err := c.sentinel.Close() + if err != nil && firstErr == nil { + firstErr = err + } + c.sentinel = nil + + return firstErr +} + +func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) { + if c.opt == nil { + return "", errors.New("opt is nil") + } + + addresses, err := c.slaveAddrs(ctx, false) + if err != nil { + return "", err + } + + if len(addresses) == 0 && c.opt.UseDisconnectedSlaves { + addresses, err = c.slaveAddrs(ctx, true) + if err != nil { + return "", err + } + } + + if len(addresses) == 0 { + return c.MasterAddr(ctx) + } + return addresses[rand.Intn(len(addresses))], nil +} + +func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) { + c.mu.RLock() + sentinel := c.sentinel + c.mu.RUnlock() + + if sentinel != nil { + addr := c.getMasterAddr(ctx, sentinel) + if addr != "" { + return addr, nil + } + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.sentinel != nil { + addr := c.getMasterAddr(ctx, c.sentinel) + if addr != "" { + return addr, nil + } + _ = c.closeSentinel() + } + + for i, sentinelAddr := range c.sentinelAddrs { + sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) + + masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s", + c.opt.MasterName, err) + _ = sentinel.Close() + continue + } + + // Push working sentinel to the top. + c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] + c.setSentinel(ctx, sentinel) + + addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) + return addr, nil + } + + return "", errors.New("redis: all sentinels specified in configuration are unreachable") +} + +func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) { + c.mu.RLock() + sentinel := c.sentinel + c.mu.RUnlock() + + if sentinel != nil { + addrs := c.getSlaveAddrs(ctx, sentinel) + if len(addrs) > 0 { + return addrs, nil + } + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.sentinel != nil { + addrs := c.getSlaveAddrs(ctx, c.sentinel) + if len(addrs) > 0 { + return addrs, nil + } + _ = c.closeSentinel() + } + + var sentinelReachable bool + + for i, sentinelAddr := range c.sentinelAddrs { + sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) + + slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s", + c.opt.MasterName, err) + _ = sentinel.Close() + continue + } + sentinelReachable = true + addrs := parseSlaveAddrs(slaves, useDisconnected) + if len(addrs) == 0 { + continue + } + // Push working sentinel to the top. + c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] + c.setSentinel(ctx, sentinel) + + return addrs, nil + } + + if sentinelReachable { + return []string{}, nil + } + return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable") +} + +func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string { + addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s", + c.opt.MasterName, err) + return "" + } + return net.JoinHostPort(addr[0], addr[1]) +} + +func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string { + addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s", + c.opt.MasterName, err) + return []string{} + } + return parseSlaveAddrs(addrs, false) +} + +func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string { + nodes := make([]string, 0, len(addrs)) + for _, node := range addrs { + ip := "" + port := "" + flags := []string{} + lastkey := "" + isDown := false + + for _, key := range node.([]interface{}) { + switch lastkey { + case "ip": + ip = key.(string) + case "port": + port = key.(string) + case "flags": + flags = strings.Split(key.(string), ",") + } + lastkey = key.(string) + } + + for _, flag := range flags { + switch flag { + case "s_down", "o_down": + isDown = true + case "disconnected": + if !keepDisconnected { + isDown = true + } + } + } + + if !isDown { + nodes = append(nodes, net.JoinHostPort(ip, port)) + } + } + + return nodes +} + +func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) { + c.mu.RLock() + currentAddr := c._masterAddr //nolint:ifshort + c.mu.RUnlock() + + if addr == currentAddr { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + + if addr == c._masterAddr { + return + } + c._masterAddr = addr + + internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q", + c.opt.MasterName, addr) + if c.onFailover != nil { + c.onFailover(ctx, addr) + } +} + +func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) { + if c.sentinel != nil { + panic("not reached") + } + c.sentinel = sentinel + c.discoverSentinels(ctx) + + c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done") + go c.listen(c.pubsub) +} + +func (c *sentinelFailover) discoverSentinels(ctx context.Context) { + sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result() + if err != nil { + internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err) + return + } + for _, sentinel := range sentinels { + vals := sentinel.([]interface{}) + var ip, port string + for i := 0; i < len(vals); i += 2 { + key := vals[i].(string) + switch key { + case "ip": + ip = vals[i+1].(string) + case "port": + port = vals[i+1].(string) + } + } + if ip != "" && port != "" { + sentinelAddr := net.JoinHostPort(ip, port) + if !contains(c.sentinelAddrs, sentinelAddr) { + internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q", + sentinelAddr, c.opt.MasterName) + c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr) + } + } + } +} + +func (c *sentinelFailover) listen(pubsub *PubSub) { + ctx := context.TODO() + + if c.onUpdate != nil { + c.onUpdate(ctx) + } + + ch := pubsub.Channel() + for msg := range ch { + if msg.Channel == "+switch-master" { + parts := strings.Split(msg.Payload, " ") + if parts[0] != c.opt.MasterName { + internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0]) + continue + } + addr := net.JoinHostPort(parts[3], parts[4]) + c.trySwitchMaster(pubsub.getContext(), addr) + } + + if c.onUpdate != nil { + c.onUpdate(ctx) + } + } +} + +func contains(slice []string, str string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false +} + +//------------------------------------------------------------------------------ + +// NewFailoverClusterClient returns a client that supports routing read-only commands +// to a slave node. +func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient { + sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) + copy(sentinelAddrs, failoverOpt.SentinelAddrs) + + failover := &sentinelFailover{ + opt: failoverOpt, + sentinelAddrs: sentinelAddrs, + } + + opt := failoverOpt.clusterOptions() + opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) { + masterAddr, err := failover.MasterAddr(ctx) + if err != nil { + return nil, err + } + + nodes := []ClusterNode{{ + Addr: masterAddr, + }} + + slaveAddrs, err := failover.slaveAddrs(ctx, false) + if err != nil { + return nil, err + } + + for _, slaveAddr := range slaveAddrs { + nodes = append(nodes, ClusterNode{ + Addr: slaveAddr, + }) + } + + slots := []ClusterSlot{ + { + Start: 0, + End: 16383, + Nodes: nodes, + }, + } + return slots, nil + } + + c := NewClusterClient(opt) + + failover.mu.Lock() + failover.onUpdate = func(ctx context.Context) { + c.ReloadState(ctx) + } + failover.mu.Unlock() + + return c +} diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go new file mode 100644 index 0000000000..8c9d87202a --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/tx.go @@ -0,0 +1,149 @@ +package redis + +import ( + "context" + + "github.com/go-redis/redis/v8/internal/pool" + "github.com/go-redis/redis/v8/internal/proto" +) + +// TxFailedErr transaction redis failed. +const TxFailedErr = proto.RedisError("redis: transaction failed") + +// Tx implements Redis transactions as described in +// http://redis.io/topics/transactions. It's NOT safe for concurrent use +// by multiple goroutines, because Exec resets list of watched keys. +// +// If you don't need WATCH, use Pipeline instead. +type Tx struct { + baseClient + cmdable + statefulCmdable + hooks + ctx context.Context +} + +func (c *Client) newTx(ctx context.Context) *Tx { + tx := Tx{ + baseClient: baseClient{ + opt: c.opt, + connPool: pool.NewStickyConnPool(c.connPool), + }, + hooks: c.hooks.clone(), + ctx: ctx, + } + tx.init() + return &tx +} + +func (c *Tx) init() { + c.cmdable = c.Process + c.statefulCmdable = c.Process +} + +func (c *Tx) Context() context.Context { + return c.ctx +} + +func (c *Tx) WithContext(ctx context.Context) *Tx { + if ctx == nil { + panic("nil context") + } + clone := *c + clone.init() + clone.hooks.lock() + clone.ctx = ctx + return &clone +} + +func (c *Tx) Process(ctx context.Context, cmd Cmder) error { + return c.hooks.process(ctx, cmd, c.baseClient.process) +} + +// Watch prepares a transaction and marks the keys to be watched +// for conditional execution if there are any keys. +// +// The transaction is automatically closed when fn exits. +func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { + tx := c.newTx(ctx) + defer tx.Close(ctx) + if len(keys) > 0 { + if err := tx.Watch(ctx, keys...).Err(); err != nil { + return err + } + } + return fn(tx) +} + +// Close closes the transaction, releasing any open resources. +func (c *Tx) Close(ctx context.Context) error { + _ = c.Unwatch(ctx).Err() + return c.baseClient.Close() +} + +// Watch marks the keys to be watched for conditional execution +// of a transaction. +func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "watch" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStatusCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +// Unwatch flushes all the previously watched keys for a transaction. +func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd { + args := make([]interface{}, 1+len(keys)) + args[0] = "unwatch" + for i, key := range keys { + args[1+i] = key + } + cmd := NewStatusCmd(ctx, args...) + _ = c.Process(ctx, cmd) + return cmd +} + +// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined. +func (c *Tx) Pipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: func(ctx context.Context, cmds []Cmder) error { + return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) + }, + } + pipe.init() + return &pipe +} + +// Pipelined executes commands queued in the fn outside of the transaction. +// Use TxPipelined if you need transactional behavior. +func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.Pipeline().Pipelined(ctx, fn) +} + +// TxPipelined executes commands queued in the fn in the transaction. +// +// When using WATCH, EXEC will execute commands only if the watched keys +// were not modified, allowing for a check-and-set mechanism. +// +// Exec always returns list of commands. If transaction fails +// TxFailedErr is returned. Otherwise Exec returns an error of the first +// failed command or nil. +func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { + return c.TxPipeline().Pipelined(ctx, fn) +} + +// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined. +func (c *Tx) TxPipeline() Pipeliner { + pipe := Pipeline{ + ctx: c.ctx, + exec: func(ctx context.Context, cmds []Cmder) error { + return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) + }, + } + pipe.init() + return &pipe +} diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go new file mode 100644 index 0000000000..c89b3e5d74 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/universal.go @@ -0,0 +1,215 @@ +package redis + +import ( + "context" + "crypto/tls" + "net" + "time" +) + +// UniversalOptions information is required by UniversalClient to establish +// connections. +type UniversalOptions struct { + // Either a single address or a seed list of host:port addresses + // of cluster/sentinel nodes. + Addrs []string + + // Database to be selected after connecting to the server. + // Only single-node and failover clients. + DB int + + // Common options. + + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + OnConnect func(ctx context.Context, cn *Conn) error + + Username string + Password string + SentinelUsername string + SentinelPassword string + + MaxRetries int + MinRetryBackoff time.Duration + MaxRetryBackoff time.Duration + + DialTimeout time.Duration + ReadTimeout time.Duration + WriteTimeout time.Duration + + // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO). + PoolFIFO bool + + PoolSize int + MinIdleConns int + MaxConnAge time.Duration + PoolTimeout time.Duration + IdleTimeout time.Duration + IdleCheckFrequency time.Duration + + TLSConfig *tls.Config + + // Only cluster clients. + + MaxRedirects int + ReadOnly bool + RouteByLatency bool + RouteRandomly bool + + // The sentinel master name. + // Only failover clients. + + MasterName string +} + +// Cluster returns cluster options created from the universal options. +func (o *UniversalOptions) Cluster() *ClusterOptions { + if len(o.Addrs) == 0 { + o.Addrs = []string{"127.0.0.1:6379"} + } + + return &ClusterOptions{ + Addrs: o.Addrs, + Dialer: o.Dialer, + OnConnect: o.OnConnect, + + Username: o.Username, + Password: o.Password, + + MaxRedirects: o.MaxRedirects, + ReadOnly: o.ReadOnly, + RouteByLatency: o.RouteByLatency, + RouteRandomly: o.RouteRandomly, + + MaxRetries: o.MaxRetries, + MinRetryBackoff: o.MinRetryBackoff, + MaxRetryBackoff: o.MaxRetryBackoff, + + DialTimeout: o.DialTimeout, + ReadTimeout: o.ReadTimeout, + WriteTimeout: o.WriteTimeout, + PoolFIFO: o.PoolFIFO, + PoolSize: o.PoolSize, + MinIdleConns: o.MinIdleConns, + MaxConnAge: o.MaxConnAge, + PoolTimeout: o.PoolTimeout, + IdleTimeout: o.IdleTimeout, + IdleCheckFrequency: o.IdleCheckFrequency, + + TLSConfig: o.TLSConfig, + } +} + +// Failover returns failover options created from the universal options. +func (o *UniversalOptions) Failover() *FailoverOptions { + if len(o.Addrs) == 0 { + o.Addrs = []string{"127.0.0.1:26379"} + } + + return &FailoverOptions{ + SentinelAddrs: o.Addrs, + MasterName: o.MasterName, + + Dialer: o.Dialer, + OnConnect: o.OnConnect, + + DB: o.DB, + Username: o.Username, + Password: o.Password, + SentinelUsername: o.SentinelUsername, + SentinelPassword: o.SentinelPassword, + + MaxRetries: o.MaxRetries, + MinRetryBackoff: o.MinRetryBackoff, + MaxRetryBackoff: o.MaxRetryBackoff, + + DialTimeout: o.DialTimeout, + ReadTimeout: o.ReadTimeout, + WriteTimeout: o.WriteTimeout, + + PoolFIFO: o.PoolFIFO, + PoolSize: o.PoolSize, + MinIdleConns: o.MinIdleConns, + MaxConnAge: o.MaxConnAge, + PoolTimeout: o.PoolTimeout, + IdleTimeout: o.IdleTimeout, + IdleCheckFrequency: o.IdleCheckFrequency, + + TLSConfig: o.TLSConfig, + } +} + +// Simple returns basic options created from the universal options. +func (o *UniversalOptions) Simple() *Options { + addr := "127.0.0.1:6379" + if len(o.Addrs) > 0 { + addr = o.Addrs[0] + } + + return &Options{ + Addr: addr, + Dialer: o.Dialer, + OnConnect: o.OnConnect, + + DB: o.DB, + Username: o.Username, + Password: o.Password, + + MaxRetries: o.MaxRetries, + MinRetryBackoff: o.MinRetryBackoff, + MaxRetryBackoff: o.MaxRetryBackoff, + + DialTimeout: o.DialTimeout, + ReadTimeout: o.ReadTimeout, + WriteTimeout: o.WriteTimeout, + + PoolFIFO: o.PoolFIFO, + PoolSize: o.PoolSize, + MinIdleConns: o.MinIdleConns, + MaxConnAge: o.MaxConnAge, + PoolTimeout: o.PoolTimeout, + IdleTimeout: o.IdleTimeout, + IdleCheckFrequency: o.IdleCheckFrequency, + + TLSConfig: o.TLSConfig, + } +} + +// -------------------------------------------------------------------- + +// UniversalClient is an abstract client which - based on the provided options - +// represents either a ClusterClient, a FailoverClient, or a single-node Client. +// This can be useful for testing cluster-specific applications locally or having different +// clients in different environments. +type UniversalClient interface { + Cmdable + Context() context.Context + AddHook(Hook) + Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error + Do(ctx context.Context, args ...interface{}) *Cmd + Process(ctx context.Context, cmd Cmder) error + Subscribe(ctx context.Context, channels ...string) *PubSub + PSubscribe(ctx context.Context, channels ...string) *PubSub + Close() error + PoolStats() *PoolStats +} + +var ( + _ UniversalClient = (*Client)(nil) + _ UniversalClient = (*ClusterClient)(nil) + _ UniversalClient = (*Ring)(nil) +) + +// NewUniversalClient returns a new multi client. The type of the returned client depends +// on the following conditions: +// +// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned. +// 2. if the number of Addrs is two or more, a ClusterClient is returned. +// 3. Otherwise, a single-node Client is returned. +func NewUniversalClient(opts *UniversalOptions) UniversalClient { + if opts.MasterName != "" { + return NewFailoverClient(opts.Failover()) + } else if len(opts.Addrs) > 1 { + return NewClusterClient(opts.Cluster()) + } + return NewClient(opts.Simple()) +} diff --git a/vendor/github.com/go-redis/redis/v8/version.go b/vendor/github.com/go-redis/redis/v8/version.go new file mode 100644 index 0000000000..112c9a2da0 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/version.go @@ -0,0 +1,6 @@ +package redis + +// Version is the current release version. +func Version() string { + return "8.11.5" +} diff --git a/vendor/github.com/go-redis/redis/v8/ya.make b/vendor/github.com/go-redis/redis/v8/ya.make new file mode 100644 index 0000000000..80ccb486c6 --- /dev/null +++ b/vendor/github.com/go-redis/redis/v8/ya.make @@ -0,0 +1,58 @@ +GO_LIBRARY() + +LICENSE(BSD-2-Clause) + +SRCS( + cluster.go + cluster_commands.go + command.go + commands.go + doc.go + error.go + iterator.go + options.go + pipeline.go + pubsub.go + redis.go + result.go + ring.go + script.go + sentinel.go + tx.go + universal.go + version.go +) + +GO_TEST_SRCS( + bench_decode_test.go + export_test.go + internal_test.go + options_test.go +) + +GO_XTEST_SRCS( + bench_test.go + cluster_test.go + command_test.go + commands_test.go + example_instrumentation_test.go + example_test.go + iterator_test.go + main_test.go + pipeline_test.go + pool_test.go + pubsub_test.go + race_test.go + redis_test.go + ring_test.go + sentinel_test.go + tx_test.go + universal_test.go +) + +END() + +RECURSE( + # gotest + internal +) |