-
Notifications
You must be signed in to change notification settings - Fork 801
/
Copy pathmessage.go
174 lines (151 loc) · 3.51 KB
/
message.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
package kafka
import (
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"time"
)
const compressionCodecMask int8 = 0x03
const defaultCompressionLevel int = -1
// CompressionCodec represents the compression codec available in Kafka
// See : https://cwiki.apache.org/confluence/display/KAFKA/Compression
type CompressionCodec int8
const (
CompressionNone CompressionCodec = iota
CompressionGZIP
CompressionSnappy
CompressionLZ4
)
// Message is a data structure representing kafka messages.
type Message struct {
// Topic is reads only and MUST NOT be set when writing messages
Topic string
// Partition is reads only and MUST NOT be set when writing messages
Partition int
Offset int64
Key []byte
Value []byte
// If not set at the creation, Time will be automatically set when
// writing the message.
Time time.Time
CompressionCodec int8
CompressionLevel int
}
func (msg Message) item() messageSetItem {
item := messageSetItem{
Offset: msg.Offset,
Message: msg.message(),
}
item.MessageSize = item.Message.size()
return item
}
func (msg Message) message() message {
m := message{
MagicByte: 1,
Key: msg.Key,
Value: msg.Value,
Timestamp: timestamp(msg.Time),
Attributes: msg.CompressionCodec & compressionCodecMask,
}
m.CRC = m.crc32()
return m
}
func (msg Message) encode(codec CompressionCodec, level int) (Message, error) {
var err error
switch codec {
case CompressionNone:
return msg, nil
case CompressionGZIP:
var buf bytes.Buffer
var writer *gzip.Writer
if level != defaultCompressionLevel {
writer, err = gzip.NewWriterLevel(&buf, level)
if err != nil {
return msg, err
}
} else {
writer = gzip.NewWriter(&buf)
}
if _, err := writer.Write(msg.Value); err != nil {
return msg, err
}
if err := writer.Close(); err != nil {
return msg, err
}
msg.Value = buf.Bytes()
return msg, nil
default:
return msg, fmt.Errorf("compression codec not supported.")
}
}
func (msg Message) decode() (Message, error) {
codec := msg.message().Attributes & compressionCodecMask
switch CompressionCodec(codec) {
case CompressionNone:
return msg, nil
case CompressionGZIP:
reader, err := gzip.NewReader(bytes.NewReader(msg.Value))
if err != nil {
return msg, err
}
msg.Value, err = ioutil.ReadAll(reader)
return msg, err
default:
return msg, fmt.Errorf("compression codec not supported.")
}
}
type message struct {
CRC int32
MagicByte int8
Attributes int8
Timestamp int64
Key []byte
Value []byte
}
func (m message) crc32() int32 {
return int32(crc32OfMessage(m.MagicByte, m.Attributes, m.Timestamp, m.Key, m.Value))
}
func (m message) size() int32 {
size := 4 + 1 + 1 + sizeofBytes(m.Key) + sizeofBytes(m.Value)
if m.MagicByte != 0 {
size += 8 // Timestamp
}
return size
}
func (m message) writeTo(w *bufio.Writer) {
writeInt32(w, m.CRC)
writeInt8(w, m.MagicByte)
writeInt8(w, m.Attributes)
if m.MagicByte != 0 {
writeInt64(w, m.Timestamp)
}
writeBytes(w, m.Key)
writeBytes(w, m.Value)
}
type messageSetItem struct {
Offset int64
MessageSize int32
Message message
}
func (m messageSetItem) size() int32 {
return 8 + 4 + m.Message.size()
}
func (m messageSetItem) writeTo(w *bufio.Writer) {
writeInt64(w, m.Offset)
writeInt32(w, m.MessageSize)
m.Message.writeTo(w)
}
type messageSet []messageSetItem
func (s messageSet) size() (size int32) {
for _, m := range s {
size += m.size()
}
return
}
func (s messageSet) writeTo(w *bufio.Writer) {
for _, m := range s {
m.writeTo(w)
}
}