1
1
import logging
2
2
import os
3
3
import time
4
- from mock import patch
5
- import pytest
6
- import kafka .codec
7
4
5
+ from mock import patch
8
6
import pytest
9
- from kafka .vendor .six .moves import range
10
7
from kafka .vendor import six
8
+ from kafka .vendor .six .moves import range
11
9
12
10
from . import unittest
13
11
from kafka import (
14
12
KafkaConsumer , MultiProcessConsumer , SimpleConsumer , create_message ,
15
13
create_gzip_message , KafkaProducer
16
14
)
15
+ import kafka .codec
17
16
from kafka .consumer .base import MAX_FETCH_BUFFER_SIZE_BYTES
18
17
from kafka .errors import (
19
18
ConsumerFetchSizeTooSmall , OffsetOutOfRangeError , UnsupportedVersionError ,
23
22
ProduceRequestPayload , TopicPartition , OffsetAndTimestamp
24
23
)
25
24
26
- from test .fixtures import ZookeeperFixture , KafkaFixture , random_string , version
27
- from test .testutil import KafkaIntegrationTestCase , kafka_versions , Timer
25
+ from test .fixtures import ZookeeperFixture , KafkaFixture
26
+ from test .testutil import KafkaIntegrationTestCase , Timer , env_kafka_version , random_string
28
27
29
28
30
- @pytest .mark .skipif (not version (), reason = "No KAFKA_VERSION set" )
29
+ @pytest .mark .skipif (not env_kafka_version (), reason = "No KAFKA_VERSION set" )
31
30
def test_kafka_consumer (kafka_producer , topic , kafka_consumer_factory ):
32
31
"""Test KafkaConsumer"""
33
32
kafka_consumer = kafka_consumer_factory (auto_offset_reset = 'earliest' )
@@ -54,7 +53,7 @@ def test_kafka_consumer(kafka_producer, topic, kafka_consumer_factory):
54
53
kafka_consumer .close ()
55
54
56
55
57
- @pytest .mark .skipif (not version (), reason = "No KAFKA_VERSION set" )
56
+ @pytest .mark .skipif (not env_kafka_version (), reason = "No KAFKA_VERSION set" )
58
57
def test_kafka_consumer_unsupported_encoding (
59
58
topic , kafka_producer_factory , kafka_consumer_factory ):
60
59
# Send a compressed message
@@ -211,7 +210,7 @@ def test_simple_consumer_no_reset(self):
211
210
with self .assertRaises (OffsetOutOfRangeError ):
212
211
consumer .get_message ()
213
212
214
- @kafka_versions ( '>=0.8.1' )
213
+ @pytest . mark . skipif ( not env_kafka_version (), reason = "No KAFKA_VERSION set" )
215
214
def test_simple_consumer_load_initial_offsets (self ):
216
215
self .send_messages (0 , range (0 , 100 ))
217
216
self .send_messages (1 , range (100 , 200 ))
@@ -388,7 +387,7 @@ def test_multi_proc_pending(self):
388
387
consumer .stop ()
389
388
390
389
@unittest .skip ('MultiProcessConsumer deprecated and these tests are flaky' )
391
- @kafka_versions ( '>=0.8.1' )
390
+ @pytest . mark . skipif ( not env_kafka_version (), reason = "No KAFKA_VERSION set" )
392
391
def test_multi_process_consumer_load_initial_offsets (self ):
393
392
self .send_messages (0 , range (0 , 10 ))
394
393
self .send_messages (1 , range (10 , 20 ))
@@ -459,7 +458,7 @@ def test_huge_messages(self):
459
458
460
459
big_consumer .stop ()
461
460
462
- @kafka_versions ( '>=0.8.1' )
461
+ @pytest . mark . skipif ( not env_kafka_version (), reason = "No KAFKA_VERSION set" )
463
462
def test_offset_behavior__resuming_behavior (self ):
464
463
self .send_messages (0 , range (0 , 100 ))
465
464
self .send_messages (1 , range (100 , 200 ))
@@ -491,7 +490,7 @@ def test_offset_behavior__resuming_behavior(self):
491
490
consumer2 .stop ()
492
491
493
492
@unittest .skip ('MultiProcessConsumer deprecated and these tests are flaky' )
494
- @kafka_versions ( '>=0.8.1' )
493
+ @pytest . mark . skipif ( not env_kafka_version (), reason = "No KAFKA_VERSION set" )
495
494
def test_multi_process_offset_behavior__resuming_behavior (self ):
496
495
self .send_messages (0 , range (0 , 100 ))
497
496
self .send_messages (1 , range (100 , 200 ))
@@ -548,6 +547,7 @@ def test_fetch_buffer_size(self):
548
547
messages = [ message for message in consumer ]
549
548
self .assertEqual (len (messages ), 2 )
550
549
550
+ @pytest .mark .skipif (not env_kafka_version (), reason = "No KAFKA_VERSION set" )
551
551
def test_kafka_consumer__blocking (self ):
552
552
TIMEOUT_MS = 500
553
553
consumer = self .kafka_consumer (auto_offset_reset = 'earliest' ,
@@ -586,7 +586,7 @@ def test_kafka_consumer__blocking(self):
586
586
self .assertGreaterEqual (t .interval , TIMEOUT_MS / 1000.0 )
587
587
consumer .close ()
588
588
589
- @kafka_versions ( '>= 0.8.1' )
589
+ @pytest . mark . skipif ( env_kafka_version () < ( 0 , 8 , 1 ), reason = "Requires KAFKA_VERSION >= 0.8.1" )
590
590
def test_kafka_consumer__offset_commit_resume (self ):
591
591
GROUP_ID = random_string (10 )
592
592
@@ -605,7 +605,7 @@ def test_kafka_consumer__offset_commit_resume(self):
605
605
output_msgs1 = []
606
606
for _ in range (180 ):
607
607
m = next (consumer1 )
608
- output_msgs1 .append (m )
608
+ output_msgs1 .append (( m . key , m . value ) )
609
609
self .assert_message_count (output_msgs1 , 180 )
610
610
consumer1 .close ()
611
611
@@ -621,12 +621,12 @@ def test_kafka_consumer__offset_commit_resume(self):
621
621
output_msgs2 = []
622
622
for _ in range (20 ):
623
623
m = next (consumer2 )
624
- output_msgs2 .append (m )
624
+ output_msgs2 .append (( m . key , m . value ) )
625
625
self .assert_message_count (output_msgs2 , 20 )
626
626
self .assertEqual (len (set (output_msgs1 ) | set (output_msgs2 )), 200 )
627
627
consumer2 .close ()
628
628
629
- @kafka_versions ( '>= 0.10.1' )
629
+ @pytest . mark . skipif ( env_kafka_version () < ( 0 , 10 , 1 ), reason = "Requires KAFKA_VERSION >= 0.10.1" )
630
630
def test_kafka_consumer_max_bytes_simple (self ):
631
631
self .send_messages (0 , range (100 , 200 ))
632
632
self .send_messages (1 , range (200 , 300 ))
@@ -647,7 +647,7 @@ def test_kafka_consumer_max_bytes_simple(self):
647
647
TopicPartition (self .topic , 0 ), TopicPartition (self .topic , 1 )]))
648
648
consumer .close ()
649
649
650
- @kafka_versions ( '>= 0.10.1' )
650
+ @pytest . mark . skipif ( env_kafka_version () < ( 0 , 10 , 1 ), reason = "Requires KAFKA_VERSION >= 0.10.1" )
651
651
def test_kafka_consumer_max_bytes_one_msg (self ):
652
652
# We send to only 1 partition so we don't have parallel requests to 2
653
653
# nodes for data.
@@ -673,7 +673,7 @@ def test_kafka_consumer_max_bytes_one_msg(self):
673
673
self .assertEqual (len (fetched_msgs ), 10 )
674
674
consumer .close ()
675
675
676
- @kafka_versions ( '>= 0.10.1' )
676
+ @pytest . mark . skipif ( env_kafka_version () < ( 0 , 10 , 1 ), reason = "Requires KAFKA_VERSION >= 0.10.1" )
677
677
def test_kafka_consumer_offsets_for_time (self ):
678
678
late_time = int (time .time ()) * 1000
679
679
middle_time = late_time - 1000
@@ -727,7 +727,7 @@ def test_kafka_consumer_offsets_for_time(self):
727
727
})
728
728
consumer .close ()
729
729
730
- @kafka_versions ( '>= 0.10.1' )
730
+ @pytest . mark . skipif ( env_kafka_version () < ( 0 , 10 , 1 ), reason = "Requires KAFKA_VERSION >= 0.10.1" )
731
731
def test_kafka_consumer_offsets_search_many_partitions (self ):
732
732
tp0 = TopicPartition (self .topic , 0 )
733
733
tp1 = TopicPartition (self .topic , 1 )
@@ -766,15 +766,15 @@ def test_kafka_consumer_offsets_search_many_partitions(self):
766
766
})
767
767
consumer .close ()
768
768
769
- @kafka_versions ( '< 0.10.1' )
769
+ @pytest . mark . skipif ( env_kafka_version () >= ( 0 , 10 , 1 ), reason = "Requires KAFKA_VERSION < 0.10.1" )
770
770
def test_kafka_consumer_offsets_for_time_old (self ):
771
771
consumer = self .kafka_consumer ()
772
772
tp = TopicPartition (self .topic , 0 )
773
773
774
774
with self .assertRaises (UnsupportedVersionError ):
775
775
consumer .offsets_for_times ({tp : int (time .time ())})
776
776
777
- @kafka_versions ( '>= 0.10.1' )
777
+ @pytest . mark . skipif ( env_kafka_version () < ( 0 , 10 , 1 ), reason = "Requires KAFKA_VERSION >= 0.10.1" )
778
778
def test_kafka_consumer_offsets_for_times_errors (self ):
779
779
consumer = self .kafka_consumer (fetch_max_wait_ms = 200 ,
780
780
request_timeout_ms = 500 )
0 commit comments