|
7 | 7 | from collections import OrderedDict
|
8 | 8 |
|
9 | 9 | from kafka.client_async import KafkaClient
|
10 |
| -from kafka.consumer.fetcher import Fetcher, NoOffsetForPartitionError |
| 10 | +from kafka.consumer.fetcher import ConsumerRecord, Fetcher, NoOffsetForPartitionError |
11 | 11 | from kafka.consumer.subscription_state import SubscriptionState
|
12 | 12 | from kafka.metrics import Metrics
|
13 | 13 | from kafka.protocol.fetch import FetchRequest
|
@@ -282,3 +282,26 @@ def test__handle_offset_response(fetcher, mocker):
|
282 | 282 | fetcher._handle_offset_response(fut, res)
|
283 | 283 | assert fut.failed()
|
284 | 284 | assert isinstance(fut.exception, NotLeaderForPartitionError)
|
| 285 | + |
| 286 | + |
| 287 | +def test_partition_records_offset(): |
| 288 | + """Test that compressed messagesets are handle correctly |
| 289 | + when fetch offset is in the middle of the message list |
| 290 | + """ |
| 291 | + batch_start = 120 |
| 292 | + batch_end = 130 |
| 293 | + fetch_offset = 123 |
| 294 | + tp = TopicPartition('foo', 0) |
| 295 | + messages = [ConsumerRecord(tp.topic, tp.partition, i, |
| 296 | + None, None, 'key', 'value', 'checksum', 0, 0) |
| 297 | + for i in range(batch_start, batch_end)] |
| 298 | + records = Fetcher.PartitionRecords(fetch_offset, None, messages) |
| 299 | + assert records.has_more() |
| 300 | + msgs = records.take(1) |
| 301 | + assert msgs[0].offset == 123 |
| 302 | + assert records.fetch_offset == 124 |
| 303 | + msgs = records.take(2) |
| 304 | + assert len(msgs) == 2 |
| 305 | + assert records.has_more() |
| 306 | + records.discard() |
| 307 | + assert not records.has_more() |
0 commit comments