forked from segmentio/kafka-go
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreader.go
820 lines (687 loc) · 21.5 KB
/
reader.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
package kafka
import (
"context"
"fmt"
"io"
"log"
"math"
"strconv"
"sync"
"sync/atomic"
"time"
)
const (
firstOffset = -1
lastOffset = -2
)
// Reader provides a high-level API for consuming messages from kafka.
//
// A Reader automatically manages reconnections to a kafka server, and
// blocking methods have context support for asynchronous cancellations.
type Reader struct {
// immutable fields of the reader
config ReaderConfig
// communication channels between the parent reader and its subreaders
msgs chan readerMessage
// mutable fields of the reader (synchronized on the mutex)
mutex sync.Mutex
join sync.WaitGroup
cancel context.CancelFunc
stop context.CancelFunc
version int64
offset int64
lag int64
closed bool
// reader stats are all made of atomic values, no need for synchronization.
once uint32
stctx context.Context
stats readerStats
}
// ReaderConfig is a configuration object used to create new instances of
// Reader.
type ReaderConfig struct {
// The list of broker addresses used to connect to the kafka cluster.
Brokers []string
// The topic to read messages from.
Topic string
// The partition number to read messages from.
Partition int
// An dialer used to open connections to the kafka server. This field is
// optional, if nil, the default dialer is used instead.
Dialer *Dialer
// The capacity of the internal message queue, defaults to 100 if none is
// set.
QueueCapacity int
// Min and max number of bytes to fetch from kafka in each request.
MinBytes int
MaxBytes int
// Maximum amount of time to wait for new data to come when fetching batches
// of messages from kafka.
MaxWait time.Duration
// ReadLagInterval sets the frequency at which the reader lag is updated.
// Setting this field to a negative value disables lag reporting.
ReadLagInterval time.Duration
// If not nil, specifies a logger used to report internal changes within the
// reader.
Logger *log.Logger
// ErrorLogger is the logger used to report errors. If nil, the reader falls
// back to using Logger instead.
ErrorLogger *log.Logger
}
// ReaderStats is a data structure returned by a call to Reader.Stats that exposes
// details about the behavior of the reader.
type ReaderStats struct {
Dials int64 `metric:"kafka.reader.dial.count" type:"counter"`
Fetches int64 `metric:"kafak.reader.fetch.count" type:"counter"`
Messages int64 `metric:"kafka.reader.message.count" type:"counter"`
Bytes int64 `metric:"kafka.reader.message.bytes" type:"counter"`
Rebalances int64 `metric:"kafka.reader.rebalance.count" type:"counter"`
Timeouts int64 `metric:"kafka.reader.timeout.count" type:"counter"`
Errors int64 `metric:"kafka.reader.error.count" type:"counter"`
DialTime DurationStats `metric:"kafka.reader.dial.seconds"`
ReadTime DurationStats `metric:"kafka.reader.read.seconds"`
WaitTime DurationStats `metric:"kafka.reader.wait.seconds"`
FetchSize SummaryStats `metric:"kafka.reader.fetch.size"`
FetchBytes SummaryStats `metric:"kafka.reader.fetch.bytes"`
Offset int64 `metric:"kafka.reader.offset" type:"gauge"`
Lag int64 `metric:"kafka.reader.lag" type:"gauge"`
MinBytes int64 `metric:"kafka.reader.fetch_bytes.min" type:"gauge"`
MaxBytes int64 `metric:"kafka.reader.fetch_bytes.max" type:"gauge"`
MaxWait time.Duration `metric:"kafka.reader.fetch_wait.max" type:"gauge"`
QueueLength int64 `metric:"kafka.reader.queue.length" type:"gauge"`
QueueCapacity int64 `metric:"kafka.reader.queue.capacity" type:"gauge"`
ClientID string `tag:"client_id"`
Topic string `tag:"topic"`
Partition string `tag:"partition"`
}
type readerStats struct {
dials counter
fetches counter
messages counter
bytes counter
rebalances counter
timeouts counter
errors counter
dialTime summary
readTime summary
waitTime summary
fetchSize summary
fetchBytes summary
offset gauge
lag gauge
partition string
}
// NewReader creates and returns a new Reader configured with config.
func NewReader(config ReaderConfig) *Reader {
if len(config.Brokers) == 0 {
panic("cannot create a new kafka reader with an empty list of broker addresses")
}
if len(config.Topic) == 0 {
panic("cannot create a new kafka reader with an empty topic")
}
if config.Partition < 0 || config.Partition >= math.MaxInt32 {
panic(fmt.Sprintf("partition number out of bounds: %d", config.Partition))
}
if config.Dialer == nil {
config.Dialer = DefaultDialer
}
if config.MinBytes > config.MaxBytes {
panic(fmt.Sprintf("minimum batch size greater than the maximum (min = %d, max = %d)", config.MinBytes, config.MaxBytes))
}
if config.MinBytes < 0 {
panic(fmt.Sprintf("invalid negative minimum batch size (min = %d)", config.MinBytes))
}
if config.MaxBytes < 0 {
panic(fmt.Sprintf("invalid negative maximum batch size (max = %d)", config.MaxBytes))
}
if config.MaxBytes == 0 {
config.MaxBytes = 1e6 // 1 MB
}
if config.MinBytes == 0 {
config.MinBytes = config.MaxBytes
}
if config.MaxWait == 0 {
config.MaxWait = 10 * time.Second
}
if config.ReadLagInterval == 0 {
config.ReadLagInterval = 1 * time.Minute
}
if config.QueueCapacity == 0 {
config.QueueCapacity = 100
}
stctx, stop := context.WithCancel(context.Background())
return &Reader{
config: config,
msgs: make(chan readerMessage, config.QueueCapacity),
cancel: func() {},
stop: stop,
offset: firstOffset,
stctx: stctx,
stats: readerStats{
dialTime: makeSummary(),
readTime: makeSummary(),
waitTime: makeSummary(),
fetchSize: makeSummary(),
fetchBytes: makeSummary(),
// Generate the string representation of the partition number only
// once when the reader is created.
partition: strconv.Itoa(config.Partition),
},
}
}
// Config returns the reader's configuration.
func (r *Reader) Config() ReaderConfig {
return r.config
}
// Close closes the stream, preventing the program from reading any more
// messages from it.
func (r *Reader) Close() error {
atomic.StoreUint32(&r.once, 1)
r.mutex.Lock()
closed := r.closed
r.closed = true
r.mutex.Unlock()
r.cancel()
r.stop()
r.join.Wait()
if !closed {
close(r.msgs)
}
return nil
}
// ReadMessage reads and return the next message from the r. The method call
// blocks until a message becomes available, or an error occurs. The program
// may also specify a context to asynchronously cancel the blocking operation.
//
// The method returns io.EOF to indicate that the reader has been closed.
func (r *Reader) ReadMessage(ctx context.Context) (Message, error) {
if r.config.ReadLagInterval > 0 && atomic.CompareAndSwapUint32(&r.once, 0, 1) {
go r.readLag(r.stctx)
}
for {
r.mutex.Lock()
if !r.closed && r.version == 0 {
r.start()
}
version := r.version
r.mutex.Unlock()
select {
case <-ctx.Done():
return Message{}, ctx.Err()
case m, ok := <-r.msgs:
if !ok {
return Message{}, io.EOF
}
if m.version >= version {
r.mutex.Lock()
switch {
case m.error != nil:
case version == r.version:
r.offset = m.message.Offset + 1
r.lag = m.watermark - r.offset
}
r.mutex.Unlock()
switch m.error {
case nil:
case io.EOF:
// io.EOF is used as a marker to indicate that the stream
// has been closed, in case it was received from the inner
// reader we don't want to confuse the program and replace
// the error with io.ErrUnexpectedEOF.
m.error = io.ErrUnexpectedEOF
}
return m.message, m.error
}
}
}
}
// ReadLag returns the current lag of the reader by fetching the last offset of
// the topic and partition and computing the difference between that value and
// the offset of the last message returned by ReadMessage.
//
// This method is intended to be used in cases where a program may be unable to
// call ReadMessage to update the value returned by Lag, but still needs to get
// an up to date estimation of how far behind the reader is. For example when
// the consumer is not ready to process the next message.
//
// The function returns a lag of zero when the reader's current offset is
// negative.
func (r *Reader) ReadLag(ctx context.Context) (lag int64, err error) {
type offsets struct {
first int64
last int64
}
offch := make(chan offsets, 1)
errch := make(chan error, 1)
go func() {
var off offsets
var err error
for _, broker := range r.config.Brokers {
var conn *Conn
if conn, err = r.config.Dialer.DialLeader(ctx, "tcp", broker, r.config.Topic, r.config.Partition); err != nil {
continue
}
deadline, _ := ctx.Deadline()
conn.SetDeadline(deadline)
off.first, off.last, err = conn.ReadOffsets()
conn.Close()
if err == nil {
break
}
}
if err != nil {
errch <- err
} else {
offch <- off
}
}()
select {
case off := <-offch:
switch cur := r.Offset(); {
case cur == firstOffset:
lag = off.last - off.first
case cur == lastOffset:
lag = 0
default:
lag = off.last - cur
}
case err = <-errch:
case <-ctx.Done():
err = ctx.Err()
}
return
}
// Offset returns the current offset of the reader.
func (r *Reader) Offset() int64 {
r.mutex.Lock()
offset := r.offset
r.mutex.Unlock()
r.withLogger(func(log *log.Logger) {
log.Printf("looking up offset of kafka reader for partition %d of %s: %d", r.config.Partition, r.config.Topic, offset)
})
return offset
}
// Lag returns the lag of the last message returned by ReadMessage.
func (r *Reader) Lag() int64 {
r.mutex.Lock()
lag := r.lag
r.mutex.Unlock()
return lag
}
// SetOffset changes the offset from which the next batch of messages will be
// read.
//
// Setting the offset ot -1 means to seek to the first offset.
// Setting the offset to -2 means to seek to the last offset.
//
// The method fails with io.ErrClosedPipe if the reader has already been closed.
func (r *Reader) SetOffset(offset int64) error {
var err error
r.mutex.Lock()
if r.closed {
err = io.ErrClosedPipe
} else if offset != r.offset {
r.withLogger(func(log *log.Logger) {
log.Printf("setting the offset of the kafka reader for partition %d of %s from %d to %d",
r.config.Partition, r.config.Topic, r.offset, offset)
})
r.offset = offset
if r.version != 0 {
r.start()
}
}
r.mutex.Unlock()
return err
}
// Stats returns a snapshot of the reader stats since the last time the method
// was called, or since the reader was created if it is called for the first
// time.
//
// A typical use of this method is to spawn a goroutine that will periodically
// call Stats on a kafka reader and report the metrics to a stats collection
// system.
func (r *Reader) Stats() ReaderStats {
return ReaderStats{
Dials: r.stats.dials.snapshot(),
Fetches: r.stats.fetches.snapshot(),
Messages: r.stats.messages.snapshot(),
Bytes: r.stats.bytes.snapshot(),
Rebalances: r.stats.rebalances.snapshot(),
Timeouts: r.stats.timeouts.snapshot(),
Errors: r.stats.errors.snapshot(),
DialTime: r.stats.dialTime.snapshotDuration(),
ReadTime: r.stats.readTime.snapshotDuration(),
WaitTime: r.stats.waitTime.snapshotDuration(),
FetchSize: r.stats.fetchSize.snapshot(),
FetchBytes: r.stats.fetchBytes.snapshot(),
Offset: r.stats.offset.snapshot(),
Lag: r.stats.lag.snapshot(),
MinBytes: int64(r.config.MinBytes),
MaxBytes: int64(r.config.MaxBytes),
MaxWait: r.config.MaxWait,
QueueLength: int64(len(r.msgs)),
QueueCapacity: int64(cap(r.msgs)),
ClientID: r.config.Dialer.ClientID,
Topic: r.config.Topic,
Partition: r.stats.partition,
}
}
func (r *Reader) withLogger(do func(*log.Logger)) {
if r.config.Logger != nil {
do(r.config.Logger)
}
}
func (r *Reader) withErrorLogger(do func(*log.Logger)) {
if r.config.ErrorLogger != nil {
do(r.config.ErrorLogger)
} else {
r.withLogger(do)
}
}
func (r *Reader) readLag(ctx context.Context) {
ticker := time.NewTicker(r.config.ReadLagInterval)
defer ticker.Stop()
for {
timeout, cancel := context.WithTimeout(ctx, r.config.ReadLagInterval/2)
lag, err := r.ReadLag(timeout)
cancel()
if err != nil {
r.stats.errors.observe(1)
r.withErrorLogger(func(log *log.Logger) {
log.Printf("kafka reader failed to read lag of partition %d of %s", r.config.Partition, r.config.Topic)
})
} else {
r.stats.lag.observe(lag)
}
select {
case <-ticker.C:
case <-ctx.Done():
return
}
}
}
func (r *Reader) start() {
ctx, cancel := context.WithCancel(context.Background())
r.cancel() // always cancel the previous reader
r.cancel = cancel
r.version++
r.join.Add(1)
go (&reader{
dialer: r.config.Dialer,
logger: r.config.Logger,
errorLogger: r.config.ErrorLogger,
brokers: r.config.Brokers,
topic: r.config.Topic,
partition: r.config.Partition,
minBytes: r.config.MinBytes,
maxBytes: r.config.MaxBytes,
maxWait: r.config.MaxWait,
version: r.version,
msgs: r.msgs,
stats: &r.stats,
}).run(ctx, r.offset, &r.join)
}
// A reader reads messages from kafka and produces them on its channels, it's
// used as an way to asynchronously fetch messages while the main program reads
// them using the high level reader API.
type reader struct {
dialer *Dialer
logger *log.Logger
errorLogger *log.Logger
brokers []string
topic string
partition int
minBytes int
maxBytes int
maxWait time.Duration
version int64
msgs chan<- readerMessage
stats *readerStats
}
type readerMessage struct {
version int64
message Message
watermark int64
error error
}
func (r *reader) run(ctx context.Context, offset int64, join *sync.WaitGroup) {
defer join.Done()
const backoffDelayMin = 100 * time.Millisecond
const backoffDelayMax = 1 * time.Second
// This is the reader's main loop, it only ends if the context is canceled
// and will keep attempting to reader messages otherwise.
//
// Retrying indefinitely has the nice side effect of preventing Read calls
// on the parent reader to block if connection to the kafka server fails,
// the reader keeps reporting errors on the error channel which will then
// be surfaced to the program.
// If the reader wasn't retrying then the program would block indefinitely
// on a Read call after reading the first error.
for attempt := 0; true; attempt++ {
if attempt != 0 {
if !sleep(ctx, backoff(attempt, backoffDelayMin, backoffDelayMax)) {
return
}
}
r.withLogger(func(log *log.Logger) {
log.Printf("initializing kafka reader for partition %d of %s starting at offset %d", r.partition, r.topic, offset)
})
conn, start, err := r.initialize(ctx, offset)
switch err {
case nil:
case OffsetOutOfRange:
// This would happen if the requested offset is passed the last
// offset on the partition leader. In that case we're just going
// to retry later hoping that enough data has been produced.
r.withErrorLogger(func(log *log.Logger) {
log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, OffsetOutOfRange)
})
continue
default:
// Wait 4 attempts before reporting the first errors, this helps
// mitigate situations where the kafka server is temporarily
// unavailable.
if attempt >= 3 {
r.sendError(ctx, err)
} else {
r.stats.errors.observe(1)
r.withErrorLogger(func(log *log.Logger) {
log.Printf("error initializing the kafka reader for partition %d of %s: %s", r.partition, r.topic, err)
})
}
continue
}
// Resetting the attempt counter ensures that if a failre occurs after
// a successful initialization we don't keep increasing the backoff
// timeout.
attempt = 0
// Now we're sure to have an absolute offset number, may anything happen
// to the connection we know we'll want to restart from this offset.
offset = start
errcount := 0
readLoop:
for {
if !sleep(ctx, backoff(errcount, backoffDelayMin, backoffDelayMax)) {
conn.Close()
return
}
switch offset, err = r.read(ctx, offset, conn); err {
case nil:
errcount = 0
case NotLeaderForPartition:
r.withErrorLogger(func(log *log.Logger) {
log.Printf("failed to read from current broker for partition %d of %s at offset %d, not the leader", r.partition, r.topic, offset)
})
conn.Close()
// The next call to .initialize will re-establish a connection to the proper
// partition leader.
r.stats.rebalances.observe(1)
break readLoop
case RequestTimedOut:
// Timeout on the kafka side, this can be safely retried.
errcount = 0
r.withErrorLogger(func(log *log.Logger) {
log.Printf("no messages received from kafka within the allocated time for partition %d of %s at offset %d", r.partition, r.topic, offset)
})
r.stats.timeouts.observe(1)
continue
case OffsetOutOfRange:
first, last, err := r.readOffsets(conn)
if err != nil {
r.withErrorLogger(func(log *log.Logger) {
log.Printf("the kafka reader got an error while attempting to determine whether it was reading before the first offset or after the last offset of partition %d of %s: %s", r.partition, r.topic, err)
})
conn.Close()
break readLoop
}
switch {
case offset < first:
r.withErrorLogger(func(log *log.Logger) {
log.Printf("the kafka reader is reading before the first offset for partition %d of %s, skipping from offset %d to %d (%d messages)", r.partition, r.topic, offset, first, first-offset)
})
offset, errcount = first, 0
continue // retry immediately so we don't keep falling behind due to the backoff
case offset < last:
errcount = 0
continue // more messages have already become available, retry immediately
default:
// We may be reading past the last offset, will retry later.
r.withErrorLogger(func(log *log.Logger) {
log.Printf("the kafka reader is reading passed the last offset for partition %d of %s at offset %d", r.partition, r.topic, offset)
})
}
case context.Canceled:
// Another reader has taken over, we can safely quit.
conn.Close()
return
default:
if _, ok := err.(Error); ok {
r.sendError(ctx, err)
} else {
r.withErrorLogger(func(log *log.Logger) {
log.Printf("the kafka reader got an unknown error reading partition %d of %s at offset %d: %s", r.partition, r.topic, offset, err)
})
r.stats.errors.observe(1)
conn.Close()
break readLoop
}
}
errcount++
}
}
}
func (r *reader) initialize(ctx context.Context, offset int64) (conn *Conn, start int64, err error) {
for i := 0; i != len(r.brokers) && conn == nil; i++ {
var broker = r.brokers[i]
var first int64
var last int64
t0 := time.Now()
conn, err = r.dialer.DialLeader(ctx, "tcp", broker, r.topic, r.partition)
t1 := time.Now()
r.stats.dials.observe(1)
r.stats.dialTime.observeDuration(t1.Sub(t0))
if err != nil {
continue
}
if first, last, err = r.readOffsets(conn); err != nil {
conn.Close()
conn = nil
break
}
switch {
case offset == firstOffset:
offset = first
case offset == lastOffset:
offset = last
case offset < first:
offset = first
}
r.withLogger(func(log *log.Logger) {
log.Printf("the kafka reader for partition %d of %s is seeking to offset %d", r.partition, r.topic, offset)
})
if start, err = conn.Seek(offset, 1); err != nil {
conn.Close()
conn = nil
break
}
conn.SetDeadline(time.Time{})
}
return
}
func (r *reader) read(ctx context.Context, offset int64, conn *Conn) (int64, error) {
r.stats.fetches.observe(1)
r.stats.offset.observe(offset)
t0 := time.Now()
conn.SetReadDeadline(t0.Add(r.maxWait))
batch := conn.ReadBatch(r.minBytes, r.maxBytes)
highWaterMark := batch.HighWaterMark()
t1 := time.Now()
r.stats.waitTime.observeDuration(t1.Sub(t0))
var msg Message
var err error
var size int64
var bytes int64
const safetyTimeout = 10 * time.Second
deadline := time.Now().Add(safetyTimeout)
conn.SetReadDeadline(deadline)
for {
if now := time.Now(); deadline.Sub(now) < (safetyTimeout / 2) {
deadline = now.Add(safetyTimeout)
conn.SetReadDeadline(deadline)
}
if msg, err = batch.ReadMessage(); err != nil {
err = batch.Close()
break
}
n := int64(len(msg.Key) + len(msg.Value))
r.stats.messages.observe(1)
r.stats.bytes.observe(n)
if err = r.sendMessage(ctx, msg, highWaterMark); err != nil {
err = batch.Close()
break
}
offset = msg.Offset + 1
r.stats.offset.observe(offset)
r.stats.lag.observe(highWaterMark - offset)
size++
bytes += n
}
conn.SetReadDeadline(time.Time{})
t2 := time.Now()
r.stats.readTime.observeDuration(t2.Sub(t1))
r.stats.fetchSize.observe(size)
r.stats.fetchBytes.observe(bytes)
return offset, err
}
func (r *reader) readOffsets(conn *Conn) (first int64, last int64, err error) {
conn.SetDeadline(time.Now().Add(10 * time.Second))
return conn.ReadOffsets()
}
func (r *reader) sendMessage(ctx context.Context, msg Message, watermark int64) error {
select {
case r.msgs <- readerMessage{version: r.version, message: msg, watermark: watermark}:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func (r *reader) sendError(ctx context.Context, err error) error {
select {
case r.msgs <- readerMessage{version: r.version, error: err}:
return nil
case <-ctx.Done():
return ctx.Err()
}
}
func (r *reader) withLogger(do func(*log.Logger)) {
if r.logger != nil {
do(r.logger)
}
}
func (r *reader) withErrorLogger(do func(*log.Logger)) {
if r.errorLogger != nil {
do(r.errorLogger)
} else {
r.withLogger(do)
}
}