Skip to content

Fix typos #1938

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Nov 8, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion benchmarks/consumer_performance.py
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ def get_args_parser():
default=100)
parser.add_argument(
'--consumer-config', type=str, nargs='+', default=(),
help='kafka consumer related configuaration properties like '
help='kafka consumer related configuration properties like '
'bootstrap_servers,client_id etc..')
parser.add_argument(
'--fixture-compression', type=str,
Expand Down
2 changes: 1 addition & 1 deletion kafka/admin/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ class KafkaAdminClient(object):
should verify that the certificate matches the broker's hostname.
Default: True.
ssl_cafile (str): Optional filename of CA file to use in certificate
veriication. Default: None.
verification. Default: None.
ssl_certfile (str): Optional filename of file in PEM format containing
the client certificate, as well as any CA certificates needed to
establish the certificate's authenticity. Default: None.
Expand Down
2 changes: 1 addition & 1 deletion kafka/client_async.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ class KafkaClient(object):
should verify that the certificate matches the broker's hostname.
Default: True.
ssl_cafile (str): Optional filename of CA file to use in certificate
veriication. Default: None.
verification. Default: None.
ssl_certfile (str): Optional filename of file in PEM format containing
the client certificate, as well as any CA certificates needed to
establish the certificate's authenticity. Default: None.
Expand Down
4 changes: 2 additions & 2 deletions kafka/conn.py
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ def __init__(self, host, port, afi, **configs):
self.config['send_buffer_bytes']))

assert self.config['security_protocol'] in self.SECURITY_PROTOCOLS, (
'security_protcol must be in ' + ', '.join(self.SECURITY_PROTOCOLS))
'security_protocol must be in ' + ', '.join(self.SECURITY_PROTOCOLS))

if self.config['security_protocol'] in ('SSL', 'SASL_SSL'):
assert ssl_available, "Python wasn't built with SSL support"
Expand Down Expand Up @@ -1196,7 +1196,7 @@ def check_version(self, timeout=2, strict=False, topics=[]):
# by looking at ApiVersionResponse
api_versions = self._handle_api_version_response(f.value)
version = self._infer_broker_version_from_api_versions(api_versions)
log.info('Broker version identifed as %s', '.'.join(map(str, version)))
log.info('Broker version identified as %s', '.'.join(map(str, version)))
log.info('Set configuration api_version=%s to skip auto'
' check_version requests on startup', version)
break
Expand Down
4 changes: 2 additions & 2 deletions kafka/consumer/fetcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def _retrieve_offsets(self, timestamps, timeout_ms=float("inf")):
Arguments:
timestamps: {TopicPartition: int} dict with timestamps to fetch
offsets by. -1 for the latest available, -2 for the earliest
available. Otherwise timestamp is treated as epoch miliseconds.
available. Otherwise timestamp is treated as epoch milliseconds.

Returns:
{TopicPartition: (int, int)}: Mapping of partition to
Expand Down Expand Up @@ -291,7 +291,7 @@ def _retrieve_offsets(self, timestamps, timeout_ms=float("inf")):
self._client.poll(future=refresh_future, timeout_ms=remaining_ms)

# Issue #1780
# Recheck partition existance after after a successful metadata refresh
# Recheck partition existence after after a successful metadata refresh
if refresh_future.succeeded() and isinstance(future.exception, Errors.StaleMetadata):
log.debug("Stale metadata was raised, and we now have an updated metadata. Rechecking partition existance")
unknown_partition = future.exception.args[0] # TopicPartition from StaleMetadata
Expand Down
2 changes: 1 addition & 1 deletion kafka/coordinator/consumer.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def __init__(self, client, subscription, metrics, **configs):
adjusted even lower to control the expected time for normal
rebalances. Default: 3000
session_timeout_ms (int): The timeout used to detect failures when
using Kafka's group managementment facilities. Default: 30000
using Kafka's group management facilities. Default: 30000
retry_backoff_ms (int): Milliseconds to backoff when retrying on
errors. Default: 100.
exclude_internal_topics (bool): Whether records from internal topics
Expand Down
2 changes: 1 addition & 1 deletion kafka/producer/kafka.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ class KafkaProducer(object):
'linger' for the specified time waiting for more records to show
up. This setting defaults to 0 (i.e. no delay). Setting linger_ms=5
would have the effect of reducing the number of requests sent but
would add up to 5ms of latency to records sent in the absense of
would add up to 5ms of latency to records sent in the absence of
load. Default: 0.
partitioner (callable): Callable used to determine which partition
each message is assigned to. Called (after key serialization):
Expand Down
2 changes: 1 addition & 1 deletion kafka/record/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def decode_varint(buffer, pos=0):
on how those can be produced.

Arguments:
buffer (bytearry): buffer to read from.
buffer (bytearray): buffer to read from.
pos (int): optional position to read from

Returns:
Expand Down
2 changes: 1 addition & 1 deletion test/record/test_records.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def test_memory_records_builder(magic, compression_type):
size_before_close = builder.size_in_bytes()
assert size_before_close == sum(msg_sizes) + base_size

# Size should remain the same after closing. No traling bytes
# Size should remain the same after closing. No trailing bytes
builder.close()
assert builder.compression_rate() > 0
expected_size = size_before_close * builder.compression_rate()
Expand Down