Skip to content

Commit b7217b7

Browse files
authored
Add default resources for new kafka server fixtures (#2484)
1 parent 6b3a905 commit b7217b7

File tree

6 files changed

+226
-2
lines changed

6 files changed

+226
-2
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ servers/dist/jakarta.xml.bind-api-2.3.3.jar:
8989
servers/%/kafka-bin: servers/dist/$$(call kafka_artifact_name,$$*) | servers/dist
9090
@echo "Extracting kafka $* binaries from $<"
9191
if [ -d "$@" ]; then rm -rf $@.bak; mv $@ $@.bak; fi
92-
mkdir $@
92+
mkdir -p $@
9393
tar xzvf $< -C $@ --strip-components 1
9494
if [[ "$*" < "1" ]]; then make servers/patch-libs/$*; fi
9595

Lines changed: 171 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
# see kafka.server.KafkaConfig for additional details and defaults
17+
18+
############################# Server Basics #############################
19+
20+
# The id of the broker. This must be set to a unique integer for each broker.
21+
broker.id={broker_id}
22+
23+
############################# Socket Server Settings #############################
24+
25+
# The address the socket server listens on. It will get the value returned from
26+
# java.net.InetAddress.getCanonicalHostName() if not configured.
27+
# FORMAT:
28+
# listeners = listener_name://host_name:port
29+
# EXAMPLE:
30+
# listeners = PLAINTEXT://your.host.name:9092
31+
listeners={transport}://{host}:{port}
32+
security.inter.broker.protocol={transport}
33+
34+
{sasl_config}
35+
36+
ssl.keystore.location={ssl_dir}/kafka.server.keystore.jks
37+
ssl.keystore.password=foobar
38+
ssl.key.password=foobar
39+
ssl.truststore.location={ssl_dir}/kafka.server.truststore.jks
40+
ssl.truststore.password=foobar
41+
42+
authorizer.class.name=kafka.security.authorizer.AclAuthorizer
43+
allow.everyone.if.no.acl.found=true
44+
45+
# The port the socket server listens on
46+
#port=9092
47+
48+
# Hostname and port the broker will advertise to producers and consumers. If not set,
49+
# it uses the value for "listeners" if configured. Otherwise, it will use the value
50+
# returned from java.net.InetAddress.getCanonicalHostName().
51+
#advertised.listeners=PLAINTEXT://your.host.name:9092
52+
53+
# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
54+
#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
55+
56+
# The number of threads that the server uses for receiving requests from the network and sending responses to the network
57+
num.network.threads=3
58+
59+
# The number of threads that the server uses for processing requests, which may include disk I/O
60+
num.io.threads=8
61+
62+
# The send buffer (SO_SNDBUF) used by the socket server
63+
socket.send.buffer.bytes=102400
64+
65+
# The receive buffer (SO_RCVBUF) used by the socket server
66+
socket.receive.buffer.bytes=102400
67+
68+
# The maximum size of a request that the socket server will accept (protection against OOM)
69+
socket.request.max.bytes=104857600
70+
71+
72+
############################# Log Basics #############################
73+
74+
# A comma separated list of directories under which to store log files
75+
log.dirs={tmp_dir}/data
76+
77+
# The default number of log partitions per topic. More partitions allow greater
78+
# parallelism for consumption, but this will also result in more files across
79+
# the brokers.
80+
num.partitions={partitions}
81+
default.replication.factor={replicas}
82+
83+
## Short Replica Lag -- Drops failed brokers out of ISR
84+
replica.lag.time.max.ms=1000
85+
replica.socket.timeout.ms=1000
86+
87+
# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
88+
# This value is recommended to be increased for installations with data dirs located in RAID array.
89+
num.recovery.threads.per.data.dir=1
90+
91+
############################# Internal Topic Settings #############################
92+
# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
93+
# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
94+
offsets.topic.replication.factor=1
95+
transaction.state.log.replication.factor=1
96+
transaction.state.log.min.isr=1
97+
98+
############################# Log Flush Policy #############################
99+
100+
# Messages are immediately written to the filesystem but by default we only fsync() to sync
101+
# the OS cache lazily. The following configurations control the flush of data to disk.
102+
# There are a few important trade-offs here:
103+
# 1. Durability: Unflushed data may be lost if you are not using replication.
104+
# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
105+
# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
106+
# The settings below allow one to configure the flush policy to flush data after a period of time or
107+
# every N messages (or both). This can be done globally and overridden on a per-topic basis.
108+
109+
# The number of messages to accept before forcing a flush of data to disk
110+
#log.flush.interval.messages=10000
111+
112+
# The maximum amount of time a message can sit in a log before we force a flush
113+
#log.flush.interval.ms=1000
114+
115+
############################# Log Retention Policy #############################
116+
117+
# The following configurations control the disposal of log segments. The policy can
118+
# be set to delete segments after a period of time, or after a given size has accumulated.
119+
# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
120+
# from the end of the log.
121+
122+
# The minimum age of a log file to be eligible for deletion due to age
123+
log.retention.hours=168
124+
125+
# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
126+
# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
127+
#log.retention.bytes=1073741824
128+
129+
# The maximum size of a log segment file. When this size is reached a new log segment will be created.
130+
log.segment.bytes=1073741824
131+
132+
# The interval at which log segments are checked to see if they can be deleted according
133+
# to the retention policies
134+
log.retention.check.interval.ms=300000
135+
136+
# By default the log cleaner is disabled and the log retention policy will default to just delete segments after their retention expires.
137+
# If log.cleaner.enable=true is set the cleaner will be enabled and individual logs can then be marked for log compaction.
138+
log.cleaner.enable=false
139+
140+
# tune down offset topics to reduce setup time in tests
141+
offsets.commit.timeout.ms=500
142+
offsets.topic.num.partitions=2
143+
offsets.topic.replication.factor=1
144+
145+
# Allow shorter session timeouts for tests
146+
group.min.session.timeout.ms=1000
147+
148+
149+
############################# Zookeeper #############################
150+
151+
# Zookeeper connection string (see zookeeper docs for details).
152+
# This is a comma separated host:port pairs, each corresponding to a zk
153+
# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
154+
# You can also append an optional chroot string to the urls to specify the
155+
# root directory for all kafka znodes.
156+
zookeeper.connect={zk_host}:{zk_port}/{zk_chroot}
157+
158+
# Timeout in ms for connecting to zookeeper
159+
zookeeper.connection.timeout.ms=30000
160+
# We want to expire kafka broker sessions quickly when brokers die b/c we restart them quickly
161+
zookeeper.session.timeout.ms=500
162+
163+
164+
############################# Group Coordinator Settings #############################
165+
166+
# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
167+
# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
168+
# The default value for this is 3 seconds.
169+
# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
170+
# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
171+
group.initial.rebalance.delay.ms=0
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
KafkaServer {{
2+
{jaas_config}
3+
}};
4+
Client {{}};
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
16+
log4j.rootLogger=INFO, stdout, logfile
17+
18+
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
19+
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
20+
log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
21+
22+
log4j.appender.logfile=org.apache.log4j.FileAppender
23+
log4j.appender.logfile.File=${kafka.logs.dir}/server.log
24+
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
25+
log4j.appender.logfile.layout.ConversionPattern=[%d] %p %m (%c)%n
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
# Licensed to the Apache Software Foundation (ASF) under one or more
2+
# contributor license agreements. See the NOTICE file distributed with
3+
# this work for additional information regarding copyright ownership.
4+
# The ASF licenses this file to You under the Apache License, Version 2.0
5+
# (the "License"); you may not use this file except in compliance with
6+
# the License. You may obtain a copy of the License at
7+
#
8+
# http://www.apache.org/licenses/LICENSE-2.0
9+
#
10+
# Unless required by applicable law or agreed to in writing, software
11+
# distributed under the License is distributed on an "AS IS" BASIS,
12+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
# See the License for the specific language governing permissions and
14+
# limitations under the License.
15+
# the directory where the snapshot is stored.
16+
dataDir={tmp_dir}
17+
# the port at which the clients will connect
18+
clientPort={port}
19+
clientPortAddress={host}
20+
# disable the per-ip limit on the number of connections since this is a non-production config
21+
maxClientCnxns=0

test/fixtures.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,10 @@ def download_official_distribution(cls,
111111

112112
@classmethod
113113
def test_resource(cls, filename):
114-
return os.path.join(cls.project_root, "servers", cls.kafka_version, "resources", filename)
114+
path = os.path.join(cls.project_root, "servers", cls.kafka_version, "resources", filename)
115+
if os.path.isfile(path):
116+
return path
117+
return os.path.join(cls.project_root, "servers", "resources", "default", filename)
115118

116119
@classmethod
117120
def kafka_run_class_args(cls, *args):

0 commit comments

Comments
 (0)