This commit is contained in:
孟帅
2023-02-23 17:53:04 +08:00
parent 7cf1b8ce8e
commit 61d0988d2c
402 changed files with 18340 additions and 35547 deletions

View File

@@ -0,0 +1,118 @@
package disk
import (
"context"
"errors"
"io"
"os"
"sync"
"time"
)
const (
filePerm = 0600 // 数据写入权限
indexFile = ".index" // 消息索引文件
)
type Config struct {
GroupName string // 组群名称
Path string // 数据存放路径
BatchSize int64 // 每N条消息同步一次batchSize和batchTime满足其一就会同步一次
BatchTime time.Duration // 每N秒消息同步一次
SegmentSize int64 // 每个topic分片数据文件最大字节
SegmentLimit int64 // 每个topic最大分片数据文件数量
}
type Queue struct {
sync.RWMutex
close bool
ticker *time.Ticker
wg *sync.WaitGroup
ctx context.Context
cancel context.CancelFunc
writer *writer
reader *reader
}
func New(config *Config) (queue *Queue, err error) {
if _, err = os.Stat(config.Path); err != nil {
return
}
queue = &Queue{close: false, wg: &sync.WaitGroup{}, writer: &writer{config: config}, reader: &reader{config: config}}
queue.ticker = time.NewTicker(config.BatchTime)
queue.ctx, queue.cancel = context.WithCancel(context.TODO())
err = queue.reader.restore()
if err != nil {
return
}
go queue.sync()
return
}
// Write data
func (q *Queue) Write(data []byte) error {
if q.close {
return errors.New("closed")
}
q.Lock()
defer q.Unlock()
return q.writer.write(data)
}
// Read data
func (q *Queue) Read() (int64, int64, []byte, error) {
if q.close {
return 0, 0, nil, errors.New("closed")
}
q.RLock()
defer q.RUnlock()
index, offset, data, err := q.reader.read()
if err == io.EOF && (q.writer.file == nil || q.reader.file.Name() != q.writer.file.Name()) {
_ = q.reader.safeRotate()
}
return index, offset, data, err
}
// Commit index and offset
func (q *Queue) Commit(index int64, offset int64) {
if q.close {
return
}
ck := &q.reader.checkpoint
ck.Index, ck.Offset = index, offset
q.reader.sync()
}
// Close Queue
func (q *Queue) Close() {
if q.close {
return
}
q.close = true
q.cancel()
q.wg.Wait()
q.writer.close()
q.reader.close()
}
// sync data
func (q *Queue) sync() {
q.wg.Add(1)
defer q.wg.Done()
for {
select {
case <-q.ticker.C:
q.Lock()
q.writer.sync()
q.Unlock()
case <-q.ctx.Done():
return
}
}
}

View File

@@ -0,0 +1,176 @@
package disk
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strconv"
)
var (
errorQueueEmpty = errors.New("queue is empty")
)
type reader struct {
file *os.File
index int64
offset int64
reader *bufio.Reader
checkpoint checkpoint
config *Config
}
type checkpoint struct {
Index int64 `json:"index"`
Offset int64 `json:"offset"`
}
// read data
func (r *reader) read() (int64, int64, []byte, error) {
if err := r.check(); err != nil {
return r.index, r.offset, nil, err
}
// read a line
data, err := r.reader.ReadBytes('\n')
if err != nil {
return r.index, r.offset, nil, err
}
data = bytes.TrimRight(data, "\n")
r.offset += int64(len(data)) + 1
return r.index, r.offset, data, err
}
// check a new segment
func (r *reader) check() error {
if r.file != nil {
return nil
}
file, err := r.next()
if err != nil {
return err
}
return r.open(file)
}
func (r *reader) open(file string) (err error) {
if r.file, err = os.OpenFile(file, os.O_RDONLY, filePerm); err != nil {
return err
}
// get file index
r.index = r.getIndex(file)
// seek read offset
if _, err = r.file.Seek(r.offset, 0); err != nil {
return err
}
r.reader = bufio.NewReader(r.file)
return nil
}
// safeRotate to next segment
func (r *reader) safeRotate() error {
// if there is no next file, it is not cleared
if _, err := r.next(); err == errorQueueEmpty {
return nil
}
return r.rotate()
}
// rotate to next segment
func (r *reader) rotate() error {
if r.file == nil {
return nil
}
// close segment
_ = r.file.Close()
r.file, r.offset, r.reader = nil, 0, nil
return nil
}
// close reader
func (r *reader) close() {
if r.file == nil {
return
}
if err := r.file.Close(); err != nil {
return
}
r.file, r.reader, r.index, r.offset = nil, nil, 0, 0
}
// sync index and offset
func (r *reader) sync() {
name := path.Join(r.config.Path, indexFile)
data, _ := json.Marshal(&r.checkpoint)
_ = ioutil.WriteFile(name, data, filePerm)
}
// restore index and offset
func (r *reader) restore() (err error) {
name := path.Join(r.config.Path, indexFile)
// uninitialized
if _, err1 := os.Stat(name); err1 != nil {
r.sync()
}
data, _ := ioutil.ReadFile(name)
_ = json.Unmarshal(data, &r.checkpoint)
r.index, r.offset = r.checkpoint.Index, r.checkpoint.Offset
if r.index == 0 {
return
}
if err = r.open(fmt.Sprintf("%s/%d.data", r.config.Path, r.index)); err != nil {
r.offset = 0
}
return
}
// next segment
func (r *reader) next() (string, error) {
files, err := filepath.Glob(filepath.Join(r.config.Path, "*.data"))
if err != nil {
return "", err
}
sort.Strings(files)
for _, file := range files {
index := r.getIndex(file)
if index < r.checkpoint.Index {
_ = os.Remove(file) // remove expired segment
}
if index > r.index {
return file, nil
}
}
return "", errorQueueEmpty
}
// get segment index
func (r *reader) getIndex(filename string) int64 {
base := filepath.Base(filename)
name := base[0 : len(base)-len(path.Ext(filename))]
index, _ := strconv.ParseInt(name, 10, 64)
return index
}

View File

@@ -0,0 +1,102 @@
package disk
import (
"bufio"
"errors"
"fmt"
"os"
"path"
"path/filepath"
"time"
)
type writer struct {
file *os.File
size int64
count int64
writer *bufio.Writer
config *Config
}
// write data
func (w *writer) write(data []byte) error {
// append newline
data = append(data, "\n"...)
size := int64(len(data))
// close current segment for rotate
if w.size+size > w.config.SegmentSize {
w.close()
}
// create a new segment
if w.file == nil {
if err := w.open(); err != nil {
return err
}
}
// write to buffer
if _, err := w.writer.Write(data); err != nil {
return err
}
w.size += size
// sync data to disk
w.count++
if w.count >= w.config.BatchSize {
w.sync()
}
return nil
}
// create a new segment
func (w *writer) open() (err error) {
if w.segmentNum() >= w.config.SegmentLimit {
return errors.New("segment num exceeds the limit")
}
name := path.Join(w.config.Path, fmt.Sprintf("%013d.data", time.Now().UnixNano()/1e6))
if w.file, err = os.OpenFile(name, os.O_CREATE|os.O_WRONLY, filePerm); err != nil {
return err
}
w.size = 0
// disable auto flush
w.writer = bufio.NewWriterSize(w.file, int(w.config.SegmentSize))
w.writer.Reset(w.file)
return err
}
// sync data to disk
func (w *writer) sync() {
if w.writer == nil {
return
}
if err := w.writer.Flush(); err == nil {
w.count = 0
}
}
// close segment
func (w *writer) close() {
if w.file == nil {
return
}
w.sync()
if err := w.file.Close(); err != nil {
return
}
w.size, w.file, w.writer = 0, nil, nil
}
// segment num
func (w *writer) segmentNum() int64 {
segments, _ := filepath.Glob(path.Join(w.config.Path, "*.data"))
return int64(len(segments))
}