Skip to content

Use absolute imports everywhere #1362

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 6, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion kafka/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from __future__ import absolute_import

__title__ = 'kafka'
from .version import __version__
from kafka.version import __version__
__author__ = 'Dana Powers'
__license__ = 'Apache License 2.0'
__copyright__ = 'Copyright 2016 Dana Powers, David Arthur, and Contributors'
Expand Down
24 changes: 12 additions & 12 deletions kafka/client_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,26 +13,26 @@
import selectors # pylint: disable=import-error
except ImportError:
# vendored backport module
from .vendor import selectors34 as selectors
from kafka.vendor import selectors34 as selectors

import socket
import time

from kafka.vendor import six

from .cluster import ClusterMetadata
from .conn import BrokerConnection, ConnectionStates, collect_hosts, get_ip_port_afi
from . import errors as Errors
from .future import Future
from .metrics import AnonMeasurable
from .metrics.stats import Avg, Count, Rate
from .metrics.stats.rate import TimeUnit
from .protocol.metadata import MetadataRequest
from .util import Dict, WeakMethod
from kafka.cluster import ClusterMetadata
from kafka.conn import BrokerConnection, ConnectionStates, collect_hosts, get_ip_port_afi
from kafka import errors as Errors
from kafka.future import Future
from kafka.metrics import AnonMeasurable
from kafka.metrics.stats import Avg, Count, Rate
from kafka.metrics.stats.rate import TimeUnit
from kafka.protocol.metadata import MetadataRequest
from kafka.util import Dict, WeakMethod
# Although this looks unused, it actually monkey-patches socket.socketpair()
# and should be left in as long as we're using socket.socketpair() in this file
from .vendor import socketpair
from .version import __version__
from kafka.vendor import socketpair
from kafka.version import __version__

if six.PY2:
ConnectionError = None
Expand Down
6 changes: 3 additions & 3 deletions kafka/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@

from kafka.vendor import six

from . import errors as Errors
from .future import Future
from .structs import BrokerMetadata, PartitionMetadata, TopicPartition
from kafka import errors as Errors
from kafka.future import Future
from kafka.structs import BrokerMetadata, PartitionMetadata, TopicPartition

log = logging.getLogger(__name__)

Expand Down
6 changes: 3 additions & 3 deletions kafka/conn.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import selectors # pylint: disable=import-error
except ImportError:
# vendored backport module
from .vendor import selectors34 as selectors
from kafka.vendor import selectors34 as selectors

import socket
import struct
Expand Down Expand Up @@ -857,8 +857,8 @@ def check_version(self, timeout=2, strict=False):
# vanilla MetadataRequest. If the server did not recognize the first
# request, both will be failed with a ConnectionError that wraps
# socket.error (32, 54, or 104)
from .protocol.admin import ApiVersionRequest, ListGroupsRequest
from .protocol.commit import OffsetFetchRequest, GroupCoordinatorRequest
from kafka.protocol.admin import ApiVersionRequest, ListGroupsRequest
from kafka.protocol.commit import OffsetFetchRequest, GroupCoordinatorRequest

# Socket errors are logged as exceptions and can alarm users. Mute them
from logging import Filter
Expand Down
6 changes: 3 additions & 3 deletions kafka/consumer/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from __future__ import absolute_import

from .simple import SimpleConsumer
from .multiprocess import MultiProcessConsumer
from .group import KafkaConsumer
from kafka.consumer.simple import SimpleConsumer
from kafka.consumer.multiprocess import MultiProcessConsumer
from kafka.consumer.group import KafkaConsumer

__all__ = [
'SimpleConsumer', 'MultiProcessConsumer', 'KafkaConsumer'
Expand Down
6 changes: 3 additions & 3 deletions kafka/consumer/multiprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,15 +8,15 @@

from kafka.vendor.six.moves import queue # pylint: disable=import-error

from ..common import KafkaError
from .base import (
from kafka.common import KafkaError
from kafka.consumer.base import (
Consumer,
AUTO_COMMIT_MSG_COUNT, AUTO_COMMIT_INTERVAL,
NO_MESSAGES_WAIT_TIME_SECONDS,
FULL_QUEUE_WAIT_TIME_SECONDS,
MAX_BACKOFF_SECONDS,
)
from .simple import SimpleConsumer
from kafka.consumer.simple import SimpleConsumer


log = logging.getLogger(__name__)
Expand Down
4 changes: 2 additions & 2 deletions kafka/consumer/simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from kafka.vendor import six
from kafka.vendor.six.moves import queue # pylint: disable=import-error

from .base import (
from kafka.consumer.base import (
Consumer,
FETCH_DEFAULT_BLOCK_TIMEOUT,
AUTO_COMMIT_MSG_COUNT,
Expand All @@ -24,7 +24,7 @@
ITER_TIMEOUT_SECONDS,
NO_MESSAGES_WAIT_TIME_SECONDS
)
from ..common import (
from kafka.common import (
FetchRequestPayload, KafkaError, OffsetRequestPayload,
ConsumerFetchSizeTooSmall,
UnknownTopicOrPartitionError, NotLeaderForPartitionError,
Expand Down
4 changes: 2 additions & 2 deletions kafka/coordinator/assignors/range.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@

from kafka.vendor import six

from .abstract import AbstractPartitionAssignor
from ..protocol import ConsumerProtocolMemberMetadata, ConsumerProtocolMemberAssignment
from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor
from kafka.coordinator.protocol import ConsumerProtocolMemberMetadata, ConsumerProtocolMemberAssignment

log = logging.getLogger(__name__)

Expand Down
6 changes: 3 additions & 3 deletions kafka/coordinator/assignors/roundrobin.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@

from kafka.vendor import six

from .abstract import AbstractPartitionAssignor
from ...common import TopicPartition
from ..protocol import ConsumerProtocolMemberMetadata, ConsumerProtocolMemberAssignment
from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor
from kafka.common import TopicPartition
from kafka.coordinator.protocol import ConsumerProtocolMemberMetadata, ConsumerProtocolMemberAssignment

log = logging.getLogger(__name__)

Expand Down
14 changes: 7 additions & 7 deletions kafka/coordinator/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@

from kafka.vendor import six

from .heartbeat import Heartbeat
from .. import errors as Errors
from ..future import Future
from ..metrics import AnonMeasurable
from ..metrics.stats import Avg, Count, Max, Rate
from ..protocol.commit import GroupCoordinatorRequest, OffsetCommitRequest
from ..protocol.group import (HeartbeatRequest, JoinGroupRequest,
from kafka.coordinator.heartbeat import Heartbeat
from kafka import errors as Errors
from kafka.future import Future
from kafka.metrics import AnonMeasurable
from kafka.metrics.stats import Avg, Count, Max, Rate
from kafka.protocol.commit import GroupCoordinatorRequest, OffsetCommitRequest
from kafka.protocol.group import (HeartbeatRequest, JoinGroupRequest,
LeaveGroupRequest, SyncGroupRequest)

log = logging.getLogger('kafka.coordinator')
Expand Down
22 changes: 11 additions & 11 deletions kafka/coordinator/consumer.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,17 +7,17 @@

from kafka.vendor import six

from .base import BaseCoordinator, Generation
from .assignors.range import RangePartitionAssignor
from .assignors.roundrobin import RoundRobinPartitionAssignor
from .protocol import ConsumerProtocol
from .. import errors as Errors
from ..future import Future
from ..metrics import AnonMeasurable
from ..metrics.stats import Avg, Count, Max, Rate
from ..protocol.commit import OffsetCommitRequest, OffsetFetchRequest
from ..structs import OffsetAndMetadata, TopicPartition
from ..util import WeakMethod
from kafka.coordinator.base import BaseCoordinator, Generation
from kafka.coordinator.assignors.range import RangePartitionAssignor
from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor
from kafka.coordinator.protocol import ConsumerProtocol
from kafka import errors as Errors
from kafka.future import Future
from kafka.metrics import AnonMeasurable
from kafka.metrics.stats import Avg, Count, Max, Rate
from kafka.protocol.commit import OffsetCommitRequest, OffsetFetchRequest
from kafka.structs import OffsetAndMetadata, TopicPartition
from kafka.util import WeakMethod


log = logging.getLogger(__name__)
Expand Down
16 changes: 8 additions & 8 deletions kafka/metrics/__init__.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
from __future__ import absolute_import

from .compound_stat import NamedMeasurable
from .dict_reporter import DictReporter
from .kafka_metric import KafkaMetric
from .measurable import AnonMeasurable
from .metric_config import MetricConfig
from .metric_name import MetricName
from .metrics import Metrics
from .quota import Quota
from kafka.metrics.compound_stat import NamedMeasurable
from kafka.metrics.dict_reporter import DictReporter
from kafka.metrics.kafka_metric import KafkaMetric
from kafka.metrics.measurable import AnonMeasurable
from kafka.metrics.metric_config import MetricConfig
from kafka.metrics.metric_name import MetricName
from kafka.metrics.metrics import Metrics
from kafka.metrics.quota import Quota

__all__ = [
'AnonMeasurable', 'DictReporter', 'KafkaMetric', 'MetricConfig',
Expand Down
20 changes: 10 additions & 10 deletions kafka/metrics/stats/__init__.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
from __future__ import absolute_import

from .avg import Avg
from .count import Count
from .histogram import Histogram
from .max_stat import Max
from .min_stat import Min
from .percentile import Percentile
from .percentiles import Percentiles
from .rate import Rate
from .sensor import Sensor
from .total import Total
from kafka.metrics.stats.avg import Avg
from kafka.metrics.stats.count import Count
from kafka.metrics.stats.histogram import Histogram
from kafka.metrics.stats.max_stat import Max
from kafka.metrics.stats.min_stat import Min
from kafka.metrics.stats.percentile import Percentile
from kafka.metrics.stats.percentiles import Percentiles
from kafka.metrics.stats.rate import Rate
from kafka.metrics.stats.sensor import Sensor
from kafka.metrics.stats.total import Total

__all__ = [
'Avg', 'Count', 'Histogram', 'Max', 'Min', 'Percentile', 'Percentiles',
Expand Down
6 changes: 3 additions & 3 deletions kafka/partitioner/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from __future__ import absolute_import

from .default import DefaultPartitioner
from .hashed import HashedPartitioner, Murmur2Partitioner, LegacyPartitioner
from .roundrobin import RoundRobinPartitioner
from kafka.partitioner.default import DefaultPartitioner
from kafka.partitioner.hashed import HashedPartitioner, Murmur2Partitioner, LegacyPartitioner
from kafka.partitioner.roundrobin import RoundRobinPartitioner

__all__ = [
'DefaultPartitioner', 'RoundRobinPartitioner', 'HashedPartitioner',
Expand Down
2 changes: 1 addition & 1 deletion kafka/partitioner/default.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import random

from .hashed import murmur2
from kafka.partitioner.hashed import murmur2


class DefaultPartitioner(object):
Expand Down
2 changes: 1 addition & 1 deletion kafka/partitioner/hashed.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from kafka.vendor import six

from .base import Partitioner
from kafka.partitioner.base import Partitioner


class Murmur2Partitioner(Partitioner):
Expand Down
2 changes: 1 addition & 1 deletion kafka/partitioner/roundrobin.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from __future__ import absolute_import

from .base import Partitioner
from kafka.partitioner.base import Partitioner


class RoundRobinPartitioner(Partitioner):
Expand Down
6 changes: 3 additions & 3 deletions kafka/producer/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from __future__ import absolute_import

from .kafka import KafkaProducer
from .simple import SimpleProducer
from .keyed import KeyedProducer
from kafka.producer.kafka import KafkaProducer
from kafka.producer.simple import SimpleProducer
from kafka.producer.keyed import KeyedProducer

__all__ = [
'KafkaProducer',
Expand Down
2 changes: 1 addition & 1 deletion kafka/producer/buffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import threading
import time

from ..metrics.stats import Rate
from kafka.metrics.stats import Rate

import kafka.errors as Errors

Expand Down
4 changes: 2 additions & 2 deletions kafka/producer/future.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
import collections
import threading

from .. import errors as Errors
from ..future import Future
from kafka import errors as Errors
from kafka.future import Future


class FutureProduceResult(Future):
Expand Down
28 changes: 14 additions & 14 deletions kafka/producer/kafka.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,20 +8,20 @@
import time
import weakref

from ..vendor import six

from .. import errors as Errors
from ..client_async import KafkaClient, selectors
from ..codec import has_gzip, has_snappy, has_lz4
from ..metrics import MetricConfig, Metrics
from ..partitioner.default import DefaultPartitioner
from ..record.default_records import DefaultRecordBatchBuilder
from ..record.legacy_records import LegacyRecordBatchBuilder
from ..serializer import Serializer
from ..structs import TopicPartition
from .future import FutureRecordMetadata, FutureProduceResult
from .record_accumulator import AtomicInteger, RecordAccumulator
from .sender import Sender
from kafka.vendor import six

from kafka import errors as Errors
from kafka.client_async import KafkaClient, selectors
from kafka.codec import has_gzip, has_snappy, has_lz4
from kafka.metrics import MetricConfig, Metrics
from kafka.partitioner.default import DefaultPartitioner
from kafka.record.default_records import DefaultRecordBatchBuilder
from kafka.record.legacy_records import LegacyRecordBatchBuilder
from kafka.serializer import Serializer
from kafka.structs import TopicPartition
from kafka.producer.future import FutureRecordMetadata, FutureProduceResult
from kafka.producer.record_accumulator import AtomicInteger, RecordAccumulator
from kafka.producer.sender import Sender


log = logging.getLogger(__name__)
Expand Down
4 changes: 2 additions & 2 deletions kafka/producer/keyed.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
import logging
import warnings

from .base import Producer
from ..partitioner import HashedPartitioner
from kafka.producer.base import Producer
from kafka.partitioner import HashedPartitioner


log = logging.getLogger(__name__)
Expand Down
8 changes: 4 additions & 4 deletions kafka/producer/record_accumulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,10 @@
import threading
import time

from .. import errors as Errors
from .buffer import SimpleBufferPool
from .future import FutureRecordMetadata, FutureProduceResult
from ..structs import TopicPartition
from kafka import errors as Errors
from kafka.producer.buffer import SimpleBufferPool
from kafka.producer.future import FutureRecordMetadata, FutureProduceResult
from kafka.structs import TopicPartition
from kafka.record.memory_records import MemoryRecordsBuilder
from kafka.record.legacy_records import LegacyRecordBatchBuilder

Expand Down
Loading