forked from segmentio/kafka-go
-
Notifications
You must be signed in to change notification settings - Fork 0
/
zstd.go
127 lines (105 loc) · 2.68 KB
/
zstd.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
// Package zstd implements Zstandard compression.
package zstd
import (
"io"
"sync"
zstdlib "github.com/klauspost/compress/zstd"
kafka "github.com/segmentio/kafka-go"
)
func init() {
kafka.RegisterCompressionCodec(NewCompressionCodec())
}
const Code = 4
const DefaultCompressionLevel = 3
type CompressionCodec struct{ level zstdlib.EncoderLevel }
func NewCompressionCodec() *CompressionCodec {
return NewCompressionCodecWith(DefaultCompressionLevel)
}
func NewCompressionCodecWith(level int) *CompressionCodec {
return &CompressionCodec{zstdlib.EncoderLevelFromZstd(level)}
}
// Code implements the kafka.CompressionCodec interface.
func (c *CompressionCodec) Code() int8 { return Code }
// Name implements the kafka.CompressionCodec interface.
func (c *CompressionCodec) Name() string { return "zstd" }
// NewReader implements the kafka.CompressionCodec interface.
func (c *CompressionCodec) NewReader(r io.Reader) io.ReadCloser {
p := new(reader)
if cached := decPool.Get(); cached == nil {
p.dec, p.err = zstdlib.NewReader(r)
} else {
p.dec = cached.(*zstdlib.Decoder)
p.err = p.dec.Reset(r)
}
return p
}
var decPool sync.Pool
type reader struct {
dec *zstdlib.Decoder
err error
}
// Close implements the io.Closer interface.
func (r *reader) Close() error {
if r.dec != nil {
decPool.Put(r.dec)
r.dec = nil
r.err = io.ErrClosedPipe
}
return nil
}
// Read implements the io.Reader interface.
func (r *reader) Read(p []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
return r.dec.Read(p)
}
// WriteTo implements the io.WriterTo interface.
func (r *reader) WriteTo(w io.Writer) (n int64, err error) {
if r.err != nil {
return 0, r.err
}
return r.dec.WriteTo(w)
}
// NewWriter implements the kafka.CompressionCodec interface.
func (c *CompressionCodec) NewWriter(w io.Writer) io.WriteCloser {
p := new(writer)
if cached := encPool.Get(); cached == nil {
p.enc, p.err = zstdlib.NewWriter(w,
zstdlib.WithEncoderLevel(c.level))
} else {
p.enc = cached.(*zstdlib.Encoder)
p.enc.Reset(w)
}
return p
}
var encPool sync.Pool
type writer struct {
enc *zstdlib.Encoder
err error
}
// Close implements the io.Closer interface.
func (w *writer) Close() error {
if w.enc == nil {
return nil // already closed
}
err := w.enc.Close()
encPool.Put(w.enc)
w.enc = nil
w.err = io.ErrClosedPipe
return err
}
// WriteTo implements the io.WriterTo interface.
func (w *writer) Write(p []byte) (n int, err error) {
if w.err != nil {
return 0, w.err
}
return w.enc.Write(p)
}
// ReadFrom implements the io.ReaderFrom interface.
func (w *writer) ReadFrom(r io.Reader) (n int64, err error) {
if w.err != nil {
return 0, w.err
}
return w.enc.ReadFrom(r)
}