mirror of
https://github.com/bufanyun/hotgo.git
synced 2025-11-12 04:03:44 +08:00
发布v2.2.10版本,更新内容请查看:https://github.com/bufanyun/hotgo/tree/v2.0/docs/guide-zh-CN/addon-version-upgrade.md
This commit is contained in:
137
server/internal/library/queue/disk.go
Normal file
137
server/internal/library/queue/disk.go
Normal file
@@ -0,0 +1,137 @@
|
||||
package queue
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/gogf/gf/v2/errors/gerror"
|
||||
"github.com/gogf/gf/v2/frame/g"
|
||||
"github.com/gogf/gf/v2/os/gfile"
|
||||
"hotgo/internal/library/queue/disk"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DiskProducerMq struct {
|
||||
config *disk.Config
|
||||
producers map[string]*disk.Queue
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type DiskConsumerMq struct {
|
||||
config *disk.Config
|
||||
}
|
||||
|
||||
func RegisterDiskMqConsumer(config *disk.Config) (client MqConsumer, err error) {
|
||||
return &DiskConsumerMq{
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListenReceiveMsgDo 消费数据
|
||||
func (q *DiskConsumerMq) ListenReceiveMsgDo(topic string, receiveDo func(mqMsg MqMsg)) (err error) {
|
||||
if topic == "" {
|
||||
return gerror.New("disk.ListenReceiveMsgDo topic is empty")
|
||||
}
|
||||
|
||||
var (
|
||||
queue = NewDiskQueue(topic, q.config)
|
||||
sleep = time.Second
|
||||
)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
if index, offset, data, err := queue.Read(); err == nil {
|
||||
var mqMsg MqMsg
|
||||
if err = json.Unmarshal(data, &mqMsg); err != nil {
|
||||
g.Log().Warningf(ctx, "disk.ListenReceiveMsgDo Unmarshal err:%+v, topic:%v, data:%+v .", err, topic, string(data))
|
||||
continue
|
||||
}
|
||||
if mqMsg.MsgId != "" {
|
||||
receiveDo(mqMsg)
|
||||
queue.Commit(index, offset)
|
||||
sleep = time.Millisecond * 1
|
||||
}
|
||||
} else {
|
||||
sleep = time.Second
|
||||
}
|
||||
|
||||
time.Sleep(sleep)
|
||||
}
|
||||
}()
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func RegisterDiskMqProducer(config *disk.Config) (client MqProducer, err error) {
|
||||
return &DiskProducerMq{
|
||||
config: config,
|
||||
producers: make(map[string]*disk.Queue),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SendMsg 按字符串类型生产数据
|
||||
func (d *DiskProducerMq) SendMsg(topic string, body string) (mqMsg MqMsg, err error) {
|
||||
return d.SendByteMsg(topic, []byte(body))
|
||||
}
|
||||
|
||||
// SendByteMsg 生产数据
|
||||
func (d *DiskProducerMq) SendByteMsg(topic string, body []byte) (mqMsg MqMsg, err error) {
|
||||
if topic == "" {
|
||||
return mqMsg, gerror.New("DiskMq topic is empty")
|
||||
}
|
||||
|
||||
mqMsg = MqMsg{
|
||||
RunType: SendMsg,
|
||||
Topic: topic,
|
||||
MsgId: getRandMsgId(),
|
||||
Body: body,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
mqMsgJson, err := json.Marshal(mqMsg)
|
||||
if err != nil {
|
||||
return mqMsg, gerror.New(fmt.Sprint("queue redis 生产者解析json消息失败:", err))
|
||||
}
|
||||
|
||||
queue := d.getProducer(topic)
|
||||
if err = queue.Write(mqMsgJson); err != nil {
|
||||
return mqMsg, gerror.New(fmt.Sprint("queue disk 生产者添加消息失败:", err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *DiskProducerMq) getProducer(topic string) *disk.Queue {
|
||||
queue, ok := d.producers[topic]
|
||||
if ok {
|
||||
return queue
|
||||
}
|
||||
queue = NewDiskQueue(topic, d.config)
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
d.producers[topic] = queue
|
||||
return queue
|
||||
}
|
||||
|
||||
func NewDiskQueue(topic string, config *disk.Config) *disk.Queue {
|
||||
conf := &disk.Config{
|
||||
Path: fmt.Sprintf(config.Path + "/" + config.GroupName + "/" + topic),
|
||||
BatchSize: config.BatchSize,
|
||||
BatchTime: config.BatchTime * time.Second,
|
||||
SegmentSize: config.SegmentSize,
|
||||
SegmentLimit: config.SegmentLimit,
|
||||
}
|
||||
|
||||
if !gfile.Exists(conf.Path) {
|
||||
if err := gfile.Mkdir(conf.Path); err != nil {
|
||||
g.Log().Errorf(ctx, "NewDiskQueue Failed to create the cache directory. Procedure, err:%+v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
queue, err := disk.New(conf)
|
||||
if err != nil {
|
||||
g.Log().Errorf(ctx, "NewDiskQueue err:%v", err)
|
||||
return nil
|
||||
}
|
||||
return queue
|
||||
}
|
||||
118
server/internal/library/queue/disk/disk.go
Normal file
118
server/internal/library/queue/disk/disk.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package disk
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
filePerm = 0600 // 数据写入权限
|
||||
indexFile = ".index" // 消息索引文件
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
GroupName string // 组群名称
|
||||
Path string // 数据存放路径
|
||||
BatchSize int64 // 每N条消息同步一次,batchSize和batchTime满足其一就会同步一次
|
||||
BatchTime time.Duration // 每N秒消息同步一次
|
||||
SegmentSize int64 // 每个topic分片数据文件最大字节
|
||||
SegmentLimit int64 // 每个topic最大分片数据文件数量
|
||||
}
|
||||
|
||||
type Queue struct {
|
||||
sync.RWMutex
|
||||
close bool
|
||||
ticker *time.Ticker
|
||||
wg *sync.WaitGroup
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
writer *writer
|
||||
reader *reader
|
||||
}
|
||||
|
||||
func New(config *Config) (queue *Queue, err error) {
|
||||
if _, err = os.Stat(config.Path); err != nil {
|
||||
return
|
||||
}
|
||||
queue = &Queue{close: false, wg: &sync.WaitGroup{}, writer: &writer{config: config}, reader: &reader{config: config}}
|
||||
queue.ticker = time.NewTicker(config.BatchTime)
|
||||
queue.ctx, queue.cancel = context.WithCancel(context.TODO())
|
||||
err = queue.reader.restore()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
go queue.sync()
|
||||
return
|
||||
}
|
||||
|
||||
// Write data
|
||||
func (q *Queue) Write(data []byte) error {
|
||||
if q.close {
|
||||
return errors.New("closed")
|
||||
}
|
||||
|
||||
q.Lock()
|
||||
defer q.Unlock()
|
||||
|
||||
return q.writer.write(data)
|
||||
}
|
||||
|
||||
// Read data
|
||||
func (q *Queue) Read() (int64, int64, []byte, error) {
|
||||
if q.close {
|
||||
return 0, 0, nil, errors.New("closed")
|
||||
}
|
||||
|
||||
q.RLock()
|
||||
defer q.RUnlock()
|
||||
|
||||
index, offset, data, err := q.reader.read()
|
||||
if err == io.EOF && (q.writer.file == nil || q.reader.file.Name() != q.writer.file.Name()) {
|
||||
_ = q.reader.safeRotate()
|
||||
}
|
||||
return index, offset, data, err
|
||||
}
|
||||
|
||||
// Commit index and offset
|
||||
func (q *Queue) Commit(index int64, offset int64) {
|
||||
if q.close {
|
||||
return
|
||||
}
|
||||
|
||||
ck := &q.reader.checkpoint
|
||||
ck.Index, ck.Offset = index, offset
|
||||
q.reader.sync()
|
||||
}
|
||||
|
||||
// Close Queue
|
||||
func (q *Queue) Close() {
|
||||
if q.close {
|
||||
return
|
||||
}
|
||||
|
||||
q.close = true
|
||||
q.cancel()
|
||||
q.wg.Wait()
|
||||
q.writer.close()
|
||||
q.reader.close()
|
||||
}
|
||||
|
||||
// sync data
|
||||
func (q *Queue) sync() {
|
||||
q.wg.Add(1)
|
||||
defer q.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-q.ticker.C:
|
||||
q.Lock()
|
||||
q.writer.sync()
|
||||
q.Unlock()
|
||||
case <-q.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
176
server/internal/library/queue/disk/reader.go
Normal file
176
server/internal/library/queue/disk/reader.go
Normal file
@@ -0,0 +1,176 @@
|
||||
package disk
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var (
|
||||
errorQueueEmpty = errors.New("queue is empty")
|
||||
)
|
||||
|
||||
type reader struct {
|
||||
file *os.File
|
||||
index int64
|
||||
offset int64
|
||||
reader *bufio.Reader
|
||||
checkpoint checkpoint
|
||||
config *Config
|
||||
}
|
||||
|
||||
type checkpoint struct {
|
||||
Index int64 `json:"index"`
|
||||
Offset int64 `json:"offset"`
|
||||
}
|
||||
|
||||
// read data
|
||||
func (r *reader) read() (int64, int64, []byte, error) {
|
||||
if err := r.check(); err != nil {
|
||||
return r.index, r.offset, nil, err
|
||||
}
|
||||
|
||||
// read a line
|
||||
data, err := r.reader.ReadBytes('\n')
|
||||
if err != nil {
|
||||
return r.index, r.offset, nil, err
|
||||
}
|
||||
data = bytes.TrimRight(data, "\n")
|
||||
|
||||
r.offset += int64(len(data)) + 1
|
||||
return r.index, r.offset, data, err
|
||||
}
|
||||
|
||||
// check a new segment
|
||||
func (r *reader) check() error {
|
||||
if r.file != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
file, err := r.next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return r.open(file)
|
||||
}
|
||||
|
||||
func (r *reader) open(file string) (err error) {
|
||||
if r.file, err = os.OpenFile(file, os.O_RDONLY, filePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get file index
|
||||
r.index = r.getIndex(file)
|
||||
|
||||
// seek read offset
|
||||
if _, err = r.file.Seek(r.offset, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.reader = bufio.NewReader(r.file)
|
||||
return nil
|
||||
}
|
||||
|
||||
// safeRotate to next segment
|
||||
func (r *reader) safeRotate() error {
|
||||
// if there is no next file, it is not cleared
|
||||
if _, err := r.next(); err == errorQueueEmpty {
|
||||
return nil
|
||||
}
|
||||
|
||||
return r.rotate()
|
||||
}
|
||||
|
||||
// rotate to next segment
|
||||
func (r *reader) rotate() error {
|
||||
if r.file == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// close segment
|
||||
_ = r.file.Close()
|
||||
r.file, r.offset, r.reader = nil, 0, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// close reader
|
||||
func (r *reader) close() {
|
||||
if r.file == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := r.file.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
r.file, r.reader, r.index, r.offset = nil, nil, 0, 0
|
||||
}
|
||||
|
||||
// sync index and offset
|
||||
func (r *reader) sync() {
|
||||
name := path.Join(r.config.Path, indexFile)
|
||||
data, _ := json.Marshal(&r.checkpoint)
|
||||
_ = ioutil.WriteFile(name, data, filePerm)
|
||||
}
|
||||
|
||||
// restore index and offset
|
||||
func (r *reader) restore() (err error) {
|
||||
name := path.Join(r.config.Path, indexFile)
|
||||
|
||||
// uninitialized
|
||||
if _, err1 := os.Stat(name); err1 != nil {
|
||||
r.sync()
|
||||
}
|
||||
|
||||
data, _ := ioutil.ReadFile(name)
|
||||
|
||||
_ = json.Unmarshal(data, &r.checkpoint)
|
||||
r.index, r.offset = r.checkpoint.Index, r.checkpoint.Offset
|
||||
if r.index == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if err = r.open(fmt.Sprintf("%s/%d.data", r.config.Path, r.index)); err != nil {
|
||||
r.offset = 0
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// next segment
|
||||
func (r *reader) next() (string, error) {
|
||||
files, err := filepath.Glob(filepath.Join(r.config.Path, "*.data"))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sort.Strings(files)
|
||||
|
||||
for _, file := range files {
|
||||
index := r.getIndex(file)
|
||||
if index < r.checkpoint.Index {
|
||||
_ = os.Remove(file) // remove expired segment
|
||||
}
|
||||
|
||||
if index > r.index {
|
||||
return file, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", errorQueueEmpty
|
||||
}
|
||||
|
||||
// get segment index
|
||||
func (r *reader) getIndex(filename string) int64 {
|
||||
base := filepath.Base(filename)
|
||||
name := base[0 : len(base)-len(path.Ext(filename))]
|
||||
index, _ := strconv.ParseInt(name, 10, 64)
|
||||
return index
|
||||
}
|
||||
102
server/internal/library/queue/disk/writer.go
Normal file
102
server/internal/library/queue/disk/writer.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package disk
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
type writer struct {
|
||||
file *os.File
|
||||
size int64
|
||||
count int64
|
||||
writer *bufio.Writer
|
||||
config *Config
|
||||
}
|
||||
|
||||
// write data
|
||||
func (w *writer) write(data []byte) error {
|
||||
// append newline
|
||||
data = append(data, "\n"...)
|
||||
size := int64(len(data))
|
||||
|
||||
// close current segment for rotate
|
||||
if w.size+size > w.config.SegmentSize {
|
||||
w.close()
|
||||
}
|
||||
|
||||
// create a new segment
|
||||
if w.file == nil {
|
||||
if err := w.open(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// write to buffer
|
||||
if _, err := w.writer.Write(data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.size += size
|
||||
|
||||
// sync data to disk
|
||||
w.count++
|
||||
if w.count >= w.config.BatchSize {
|
||||
w.sync()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a new segment
|
||||
func (w *writer) open() (err error) {
|
||||
if w.segmentNum() >= w.config.SegmentLimit {
|
||||
return errors.New("segment num exceeds the limit")
|
||||
}
|
||||
|
||||
name := path.Join(w.config.Path, fmt.Sprintf("%013d.data", time.Now().UnixNano()/1e6))
|
||||
if w.file, err = os.OpenFile(name, os.O_CREATE|os.O_WRONLY, filePerm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
w.size = 0
|
||||
// disable auto flush
|
||||
w.writer = bufio.NewWriterSize(w.file, int(w.config.SegmentSize))
|
||||
w.writer.Reset(w.file)
|
||||
return err
|
||||
}
|
||||
|
||||
// sync data to disk
|
||||
func (w *writer) sync() {
|
||||
if w.writer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err := w.writer.Flush(); err == nil {
|
||||
w.count = 0
|
||||
}
|
||||
}
|
||||
|
||||
// close segment
|
||||
func (w *writer) close() {
|
||||
if w.file == nil {
|
||||
return
|
||||
}
|
||||
|
||||
w.sync()
|
||||
if err := w.file.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
w.size, w.file, w.writer = 0, nil, nil
|
||||
}
|
||||
|
||||
// segment num
|
||||
func (w *writer) segmentNum() int64 {
|
||||
segments, _ := filepath.Glob(path.Join(w.config.Path, "*.data"))
|
||||
return int64(len(segments))
|
||||
}
|
||||
@@ -1,15 +1,15 @@
|
||||
// Package queue
|
||||
// @Link https://github.com/bufanyun/hotgo
|
||||
// @Copyright Copyright (c) 2022 HotGo CLI
|
||||
// @Copyright Copyright (c) 2023 HotGo CLI
|
||||
// @Author Ms <133814250@qq.com>
|
||||
// @License https://github.com/bufanyun/hotgo/blob/master/LICENSE
|
||||
//
|
||||
package queue
|
||||
|
||||
import (
|
||||
"github.com/gogf/gf/v2/errors/gerror"
|
||||
"github.com/gogf/gf/v2/frame/g"
|
||||
"github.com/gogf/gf/v2/os/gctx"
|
||||
"hotgo/internal/library/queue/disk"
|
||||
"hotgo/utility/charset"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -31,21 +31,21 @@ const (
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Switch bool `json:"switch"`
|
||||
Driver string `json:"driver"`
|
||||
Retry int `json:"retry"`
|
||||
MultiComsumer bool `json:"multiComsumer"`
|
||||
GroupName string `json:"groupName"`
|
||||
Redis RedisConf
|
||||
Rocketmq RocketmqConf
|
||||
Kafka KafkaConf
|
||||
Switch bool `json:"switch"`
|
||||
Driver string `json:"driver"`
|
||||
Retry int `json:"retry"`
|
||||
GroupName string `json:"groupName"`
|
||||
Redis RedisConf
|
||||
Rocketmq RocketmqConf
|
||||
Kafka KafkaConf
|
||||
Disk *disk.Config
|
||||
}
|
||||
|
||||
type RedisConf struct {
|
||||
Address string `json:"address"`
|
||||
Db int `json:"db"`
|
||||
Pass string `json:"pass"`
|
||||
Timeout int `json:"timeout"`
|
||||
Address string `json:"address"`
|
||||
Db int `json:"db"`
|
||||
Pass string `json:"pass"`
|
||||
IdleTimeout int `json:"idleTimeout"`
|
||||
}
|
||||
type RocketmqConf struct {
|
||||
Address []string `json:"address"`
|
||||
@@ -53,9 +53,10 @@ type RocketmqConf struct {
|
||||
}
|
||||
|
||||
type KafkaConf struct {
|
||||
Address []string `json:"address"`
|
||||
Version string `json:"version"`
|
||||
RandClient bool `json:"randClient"`
|
||||
Address []string `json:"address"`
|
||||
Version string `json:"version"`
|
||||
RandClient bool `json:"randClient"`
|
||||
MultiConsumer bool `json:"multiConsumer"`
|
||||
}
|
||||
|
||||
type MqMsg struct {
|
||||
@@ -80,7 +81,7 @@ func init() {
|
||||
mqProducerInstanceMap = make(map[string]MqProducer)
|
||||
mqConsumerInstanceMap = make(map[string]MqConsumer)
|
||||
if err := g.Cfg().MustGet(ctx, "queue").Scan(&config); err != nil {
|
||||
g.Log().Infof(ctx, "queue init err:%+v", err)
|
||||
g.Log().Warning(ctx, "queue init err:%+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,11 +133,13 @@ func NewProducer(groupName string) (mqClient MqProducer, err error) {
|
||||
Addr: config.Redis.Address,
|
||||
Passwd: config.Redis.Pass,
|
||||
DBnum: config.Redis.Db,
|
||||
Timeout: config.Redis.Timeout,
|
||||
Timeout: config.Redis.IdleTimeout,
|
||||
}, PoolOption{
|
||||
5, 50, 5,
|
||||
}, groupName, config.Retry)
|
||||
|
||||
case "disk":
|
||||
config.Disk.GroupName = groupName
|
||||
mqClient, err = RegisterDiskMqProducer(config.Disk)
|
||||
default:
|
||||
err = gerror.New("queue driver is not support")
|
||||
}
|
||||
@@ -154,17 +157,6 @@ func NewProducer(groupName string) (mqClient MqProducer, err error) {
|
||||
|
||||
// NewConsumer 初始化消费者实例
|
||||
func NewConsumer(groupName string) (mqClient MqConsumer, err error) {
|
||||
randTag := string(charset.RandomCreateBytes(6))
|
||||
|
||||
// 是否支持创建多个消费者
|
||||
if config.MultiComsumer == false {
|
||||
randTag = "001"
|
||||
}
|
||||
|
||||
if item, ok := mqConsumerInstanceMap[groupName+"-"+randTag]; ok {
|
||||
return item, nil
|
||||
}
|
||||
|
||||
if groupName == "" {
|
||||
err = gerror.New("mq groupName is empty.")
|
||||
return
|
||||
@@ -183,6 +175,16 @@ func NewConsumer(groupName string) (mqClient MqConsumer, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
randTag := string(charset.RandomCreateBytes(6))
|
||||
// 是否支持创建多个消费者
|
||||
if config.Kafka.MultiConsumer == false {
|
||||
randTag = "001"
|
||||
}
|
||||
|
||||
if item, ok := mqConsumerInstanceMap[groupName+"-"+randTag]; ok {
|
||||
return item, nil
|
||||
}
|
||||
|
||||
clientId := "HOTGO-Consumer-" + groupName
|
||||
if config.Kafka.RandClient {
|
||||
clientId += "-" + randTag
|
||||
@@ -204,10 +206,13 @@ func NewConsumer(groupName string) (mqClient MqConsumer, err error) {
|
||||
Addr: config.Redis.Address,
|
||||
Passwd: config.Redis.Pass,
|
||||
DBnum: config.Redis.Db,
|
||||
Timeout: config.Redis.Timeout,
|
||||
Timeout: config.Redis.IdleTimeout,
|
||||
}, PoolOption{
|
||||
5, 50, 5,
|
||||
}, groupName)
|
||||
case "disk":
|
||||
config.Disk.GroupName = groupName
|
||||
mqClient, err = RegisterDiskMqConsumer(config.Disk)
|
||||
default:
|
||||
err = gerror.New("queue driver is not support")
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package queue
|
||||
// @Link https://github.com/bufanyun/hotgo
|
||||
// @Copyright Copyright (c) 2022 HotGo CLI
|
||||
// @Copyright Copyright (c) 2023 HotGo CLI
|
||||
// @Author Ms <133814250@qq.com>
|
||||
// @License https://github.com/bufanyun/hotgo/blob/master/LICENSE
|
||||
//
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package queue
|
||||
// @Link https://github.com/bufanyun/hotgo
|
||||
// @Copyright Copyright (c) 2022 HotGo CLI
|
||||
// @Copyright Copyright (c) 2023 HotGo CLI
|
||||
// @Author Ms <133814250@qq.com>
|
||||
// @License https://github.com/bufanyun/hotgo/blob/master/LICENSE
|
||||
//
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package queue
|
||||
// @Link https://github.com/bufanyun/hotgo
|
||||
// @Copyright Copyright (c) 2022 HotGo CLI
|
||||
// @Copyright Copyright (c) 2023 HotGo CLI
|
||||
// @Author Ms <133814250@qq.com>
|
||||
// @License https://github.com/bufanyun/hotgo/blob/master/LICENSE
|
||||
//
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Package queue
|
||||
// @Link https://github.com/bufanyun/hotgo
|
||||
// @Copyright Copyright (c) 2022 HotGo CLI
|
||||
// @Copyright Copyright (c) 2023 HotGo CLI
|
||||
// @Author Ms <133814250@qq.com>
|
||||
// @License https://github.com/bufanyun/hotgo/blob/master/LICENSE
|
||||
//
|
||||
|
||||
Reference in New Issue
Block a user