Created
September 3, 2019 16:16
-
-
Save bhavinkamani/5970075096f2903a33ad83da99e27053 to your computer and use it in GitHub Desktop.
kafka installation error log with docker machine and parallels driver
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
===> ENV Variables ... | |
ALLOW_UNSIGNED=false | |
COMPONENT=kafka | |
CONFLUENT_DEB_VERSION=1 | |
CONFLUENT_MAJOR_VERSION=5 | |
CONFLUENT_MINOR_VERSION=1 | |
CONFLUENT_MVN_LABEL= | |
CONFLUENT_PATCH_VERSION=2 | |
CONFLUENT_PLATFORM_LABEL= | |
CONFLUENT_VERSION=5.1.2 | |
CUB_CLASSPATH=/etc/confluent/docker/docker-utils.jar | |
HOME=/root | |
HOSTNAME=prl-dev | |
KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://localhost:39092 | |
KAFKA_BROKER_ID=1 | |
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR=1 | |
KAFKA_VERSION=2.1.1cp1 | |
KAFKA_ZOOKEEPER_CONNECT=localhost:32181 | |
LANG=C.UTF-8 | |
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin | |
PWD=/ | |
PYTHON_PIP_VERSION=8.1.2 | |
PYTHON_VERSION=2.7.9-1 | |
SCALA_VERSION=2.11 | |
SHLVL=1 | |
TERM=xterm | |
ZULU_OPENJDK_VERSION=8=8.30.0.1 | |
_=/usr/bin/env | |
===> User | |
uid=0(root) gid=0(root) groups=0(root) | |
===> Configuring ... | |
===> Running preflight checks ... | |
===> Check if /var/lib/kafka/data is writable ... | |
===> Check if Zookeeper is healthy ... | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:zookeeper.version=3.4.13-2d71af4dbe22557fda74f9a9b4309b15a7487f03, built on 06/29/2018 00:39 GMT | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:host.name=prl-dev | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.version=1.8.0_172 | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.vendor=Azul Systems, Inc. | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.class.path=/etc/confluent/docker/docker-utils.jar | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.io.tmpdir=/tmp | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:java.compiler=<NA> | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.name=Linux | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.arch=amd64 | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:os.version=4.14.134-boot2docker | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.name=root | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.home=/root | |
[main] INFO org.apache.zookeeper.ZooKeeper - Client environment:user.dir=/ | |
[main] INFO org.apache.zookeeper.ZooKeeper - Initiating client connection, connectString=localhost:32181 sessionTimeout=40000 watcher=io.confluent.admin.utils.ZookeeperConnectionWatcher@b1bc7ed | |
[main-SendThread(localhost:32181)] INFO org.apache.zookeeper.ClientCnxn - Opening socket connection to server localhost/127.0.0.1:32181. Will not attempt to authenticate using SASL (unknown error) | |
[main-SendThread(localhost:32181)] INFO org.apache.zookeeper.ClientCnxn - Socket connection established to localhost/127.0.0.1:32181, initiating session | |
[main-SendThread(localhost:32181)] INFO org.apache.zookeeper.ClientCnxn - Session establishment complete on server localhost/127.0.0.1:32181, sessionid = 0x100019eba840002, negotiated timeout = 40000 | |
[main] INFO org.apache.zookeeper.ZooKeeper - Session: 0x100019eba840002 closed | |
===> Launching ... | |
===> Launching kafka ... | |
[2019-09-03 16:12:48,052] INFO Registered kafka:type=kafka.Log4jController MBean (kafka.utils.Log4jControllerRegistration$) | |
[2019-09-03 16:12:48,296] INFO KafkaConfig values: | |
advertised.host.name = null | |
advertised.listeners = PLAINTEXT://localhost:39092 | |
advertised.port = null | |
alter.config.policy.class.name = null | |
alter.log.dirs.replication.quota.window.num = 11 | |
alter.log.dirs.replication.quota.window.size.seconds = 1 | |
authorizer.class.name = | |
auto.create.topics.enable = true | |
auto.leader.rebalance.enable = true | |
background.threads = 10 | |
broker.id = 1 | |
broker.id.generation.enable = true | |
broker.interceptor.class = class org.apache.kafka.server.interceptor.DefaultBrokerInterceptor | |
broker.rack = null | |
client.quota.callback.class = null | |
compression.type = producer | |
connection.failed.authentication.delay.ms = 100 | |
connections.max.idle.ms = 600000 | |
controlled.shutdown.enable = true | |
controlled.shutdown.max.retries = 3 | |
controlled.shutdown.retry.backoff.ms = 5000 | |
controller.socket.timeout.ms = 30000 | |
create.topic.policy.class.name = null | |
default.replication.factor = 1 | |
delegation.token.expiry.check.interval.ms = 3600000 | |
delegation.token.expiry.time.ms = 86400000 | |
delegation.token.master.key = null | |
delegation.token.max.lifetime.ms = 604800000 | |
delete.records.purgatory.purge.interval.requests = 1 | |
delete.topic.enable = true | |
fetch.purgatory.purge.interval.requests = 1000 | |
group.initial.rebalance.delay.ms = 3000 | |
group.max.session.timeout.ms = 300000 | |
group.min.session.timeout.ms = 6000 | |
host.name = | |
inter.broker.listener.name = null | |
inter.broker.protocol.version = 2.1-IV2 | |
kafka.metrics.polling.interval.secs = 10 | |
kafka.metrics.reporters = [] | |
leader.imbalance.check.interval.seconds = 300 | |
leader.imbalance.per.broker.percentage = 10 | |
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL | |
listeners = PLAINTEXT://0.0.0.0:39092 | |
log.cleaner.backoff.ms = 15000 | |
log.cleaner.dedupe.buffer.size = 134217728 | |
log.cleaner.delete.retention.ms = 86400000 | |
log.cleaner.enable = true | |
log.cleaner.io.buffer.load.factor = 0.9 | |
log.cleaner.io.buffer.size = 524288 | |
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 | |
log.cleaner.min.cleanable.ratio = 0.5 | |
log.cleaner.min.compaction.lag.ms = 0 | |
log.cleaner.threads = 1 | |
log.cleanup.policy = [delete] | |
log.dir = /tmp/kafka-logs | |
log.dirs = /var/lib/kafka/data | |
log.flush.interval.messages = 9223372036854775807 | |
log.flush.interval.ms = null | |
log.flush.offset.checkpoint.interval.ms = 60000 | |
log.flush.scheduler.interval.ms = 9223372036854775807 | |
log.flush.start.offset.checkpoint.interval.ms = 60000 | |
log.index.interval.bytes = 4096 | |
log.index.size.max.bytes = 10485760 | |
log.message.downconversion.enable = true | |
log.message.format.version = 2.1-IV2 | |
log.message.timestamp.difference.max.ms = 9223372036854775807 | |
log.message.timestamp.type = CreateTime | |
log.preallocate = false | |
log.retention.bytes = -1 | |
log.retention.check.interval.ms = 300000 | |
log.retention.hours = 168 | |
log.retention.minutes = null | |
log.retention.ms = null | |
log.roll.hours = 168 | |
log.roll.jitter.hours = 0 | |
log.roll.jitter.ms = null | |
log.roll.ms = null | |
log.segment.bytes = 1073741824 | |
log.segment.delete.delay.ms = 60000 | |
max.connections.per.ip = 2147483647 | |
max.connections.per.ip.overrides = | |
max.incremental.fetch.session.cache.slots = 1000 | |
message.max.bytes = 1000012 | |
metric.reporters = [] | |
metrics.num.samples = 2 | |
metrics.recording.level = INFO | |
metrics.sample.window.ms = 30000 | |
min.insync.replicas = 1 | |
num.io.threads = 8 | |
num.network.threads = 3 | |
num.partitions = 1 | |
num.recovery.threads.per.data.dir = 1 | |
num.replica.alter.log.dirs.threads = null | |
num.replica.fetchers = 1 | |
offset.metadata.max.bytes = 4096 | |
offsets.commit.required.acks = -1 | |
offsets.commit.timeout.ms = 5000 | |
offsets.load.buffer.size = 5242880 | |
offsets.retention.check.interval.ms = 600000 | |
offsets.retention.minutes = 10080 | |
offsets.topic.compression.codec = 0 | |
offsets.topic.num.partitions = 50 | |
offsets.topic.replication.factor = 1 | |
offsets.topic.segment.bytes = 104857600 | |
password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding | |
password.encoder.iterations = 4096 | |
password.encoder.key.length = 128 | |
password.encoder.keyfactory.algorithm = null | |
password.encoder.old.secret = null | |
password.encoder.secret = null | |
port = 9092 | |
principal.builder.class = null | |
producer.purgatory.purge.interval.requests = 1000 | |
queued.max.request.bytes = -1 | |
queued.max.requests = 500 | |
quota.consumer.default = 9223372036854775807 | |
quota.producer.default = 9223372036854775807 | |
quota.window.num = 11 | |
quota.window.size.seconds = 1 | |
replica.fetch.backoff.ms = 1000 | |
replica.fetch.max.bytes = 1048576 | |
replica.fetch.min.bytes = 1 | |
replica.fetch.response.max.bytes = 10485760 | |
replica.fetch.wait.max.ms = 500 | |
replica.high.watermark.checkpoint.interval.ms = 5000 | |
replica.lag.time.max.ms = 10000 | |
replica.socket.receive.buffer.bytes = 65536 | |
replica.socket.timeout.ms = 30000 | |
replication.quota.window.num = 11 | |
replication.quota.window.size.seconds = 1 | |
request.timeout.ms = 30000 | |
reserved.broker.max.id = 1000 | |
sasl.client.callback.handler.class = null | |
sasl.enabled.mechanisms = [GSSAPI] | |
sasl.jaas.config = null | |
sasl.kerberos.kinit.cmd = /usr/bin/kinit | |
sasl.kerberos.min.time.before.relogin = 60000 | |
sasl.kerberos.principal.to.local.rules = [DEFAULT] | |
sasl.kerberos.service.name = null | |
sasl.kerberos.ticket.renew.jitter = 0.05 | |
sasl.kerberos.ticket.renew.window.factor = 0.8 | |
sasl.login.callback.handler.class = null | |
sasl.login.class = null | |
sasl.login.refresh.buffer.seconds = 300 | |
sasl.login.refresh.min.period.seconds = 60 | |
sasl.login.refresh.window.factor = 0.8 | |
sasl.login.refresh.window.jitter = 0.05 | |
sasl.mechanism.inter.broker.protocol = GSSAPI | |
sasl.server.callback.handler.class = null | |
security.inter.broker.protocol = PLAINTEXT | |
socket.receive.buffer.bytes = 102400 | |
socket.request.max.bytes = 104857600 | |
socket.send.buffer.bytes = 102400 | |
ssl.cipher.suites = [] | |
ssl.client.auth = none | |
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] | |
ssl.endpoint.identification.algorithm = https | |
ssl.key.password = null | |
ssl.keymanager.algorithm = SunX509 | |
ssl.keystore.location = null | |
ssl.keystore.password = null | |
ssl.keystore.type = JKS | |
ssl.protocol = TLS | |
ssl.provider = null | |
ssl.secure.random.implementation = null | |
ssl.trustmanager.algorithm = PKIX | |
ssl.truststore.location = null | |
ssl.truststore.password = null | |
ssl.truststore.type = JKS | |
transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 | |
transaction.max.timeout.ms = 900000 | |
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 | |
transaction.state.log.load.buffer.size = 5242880 | |
transaction.state.log.min.isr = 2 | |
transaction.state.log.num.partitions = 50 | |
transaction.state.log.replication.factor = 3 | |
transaction.state.log.segment.bytes = 104857600 | |
transactional.id.expiration.ms = 604800000 | |
unclean.leader.election.enable = false | |
zookeeper.connect = localhost:32181 | |
zookeeper.connection.timeout.ms = null | |
zookeeper.max.in.flight.requests = 10 | |
zookeeper.session.timeout.ms = 6000 | |
zookeeper.set.acl = false | |
zookeeper.sync.time.ms = 2000 | |
(kafka.server.KafkaConfig) | |
[2019-09-03 16:12:48,350] WARN The package io.confluent.support.metrics.collectors.FullCollector for collecting the full set of support metrics could not be loaded, so we are reverting to anonymous, basic metric collection. If you are a Confluent customer, please refer to the Confluent Platform documentation, section Proactive Support, on how to activate full metrics collection. (io.confluent.support.metrics.KafkaSupportConfig) | |
[2019-09-03 16:12:48,374] WARN Please note that the support metrics collection feature ("Metrics") of Proactive Support is enabled. With Metrics enabled, this broker is configured to collect and report certain broker and cluster metadata ("Metadata") about your use of the Confluent Platform (including without limitation, your remote internet protocol address) to Confluent, Inc. ("Confluent") or its parent, subsidiaries, affiliates or service providers every 24hours. This Metadata may be transferred to any country in which Confluent maintains facilities. For a more in depth discussion of how Confluent processes such information, please read our Privacy Policy located at http://www.confluent.io/privacy. By proceeding with `confluent.support.metrics.enable=true`, you agree to all such collection, transfer, storage and use of Metadata by Confluent. You can turn the Metrics feature off by setting `confluent.support.metrics.enable=false` in the broker configuration and restarting the broker. See the Confluent Platform documentation for further information. (io.confluent.support.metrics.SupportedServerStartable) | |
[2019-09-03 16:12:48,377] INFO starting (kafka.server.KafkaServer) | |
[2019-09-03 16:12:48,378] INFO Connecting to zookeeper on localhost:32181 (kafka.server.KafkaServer) | |
[2019-09-03 16:12:48,396] INFO [ZooKeeperClient] Initializing a new session to localhost:32181. (kafka.zookeeper.ZooKeeperClient) | |
[2019-09-03 16:12:48,402] INFO Client environment:zookeeper.version=3.4.13-2d71af4dbe22557fda74f9a9b4309b15a7487f03, built on 06/29/2018 00:39 GMT (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,402] INFO Client environment:host.name=prl-dev (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,402] INFO Client environment:java.version=1.8.0_172 (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,402] INFO Client environment:java.vendor=Azul Systems, Inc. (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,402] INFO Client environment:java.home=/usr/lib/jvm/zulu-8-amd64/jre (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,402] INFO Client environment:java.class.path=/usr/bin/../share/java/kafka/jaxb-api-2.3.0.jar:/usr/bin/../share/java/kafka/javax.annotation-api-1.2.jar:/usr/bin/../share/java/kafka/javax.inject-1.jar:/usr/bin/../share/java/kafka/httpcore-4.4.4.jar:/usr/bin/../share/java/kafka/audience-annotations-0.5.0.jar:/usr/bin/../share/java/kafka/connect-basic-auth-extension-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/jline-0.9.94.jar:/usr/bin/../share/java/kafka/zkclient-0.11.jar:/usr/bin/../share/java/kafka/jackson-core-2.9.8.jar:/usr/bin/../share/java/kafka/commons-lang3-3.8.1.jar:/usr/bin/../share/java/kafka/jetty-http-9.4.12.v20180830.jar:/usr/bin/../share/java/kafka/commons-codec-1.9.jar:/usr/bin/../share/java/kafka/kafka_2.11-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/commons-collections-3.2.2.jar:/usr/bin/../share/java/kafka/jackson-databind-2.9.8.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-json-provider-2.9.8.jar:/usr/bin/../share/java/kafka/commons-compress-1.8.1.jar:/usr/bin/../share/java/kafka/aopalliance-repackaged-2.5.0-b42.jar:/usr/bin/../share/java/kafka/jetty-server-9.4.12.v20180830.jar:/usr/bin/../share/java/kafka/kafka_2.11-2.1.1-cp1-sources.jar:/usr/bin/../share/java/kafka/metrics-core-2.2.0.jar:/usr/bin/../share/java/kafka/netty-3.10.6.Final.jar:/usr/bin/../share/java/kafka/jetty-util-9.4.12.v20180830.jar:/usr/bin/../share/java/kafka/commons-beanutils-1.9.2.jar:/usr/bin/../share/java/kafka/commons-logging-1.2.jar:/usr/bin/../share/java/kafka/scala-logging_2.11-3.9.0.jar:/usr/bin/../share/java/kafka/jetty-io-9.4.12.v20180830.jar:/usr/bin/../share/java/kafka/reflections-0.9.11.jar:/usr/bin/../share/java/kafka/jackson-module-jaxb-annotations-2.9.8.jar:/usr/bin/../share/java/kafka/jackson-jaxrs-base-2.9.8.jar:/usr/bin/../share/java/kafka/jersey-hk2-2.27.jar:/usr/bin/../share/java/kafka/connect-json-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/rocksdbjni-5.14.2.jar:/usr/bin/../share/java/kafka/kafka-streams-test-utils-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/commons-validator-1.5.1.jar:/usr/bin/../share/java/kafka/kafka_2.11-2.1.1-cp1-scaladoc.jar:/usr/bin/../share/java/kafka/zstd-jni-1.3.7-1.jar:/usr/bin/../share/java/kafka/jopt-simple-5.0.4.jar:/usr/bin/../share/java/kafka/commons-lang3-3.1.jar:/usr/bin/../share/java/kafka/kafka-streams-examples-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/connect-api-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/commons-digester-1.8.1.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.1.1.jar:/usr/bin/../share/java/kafka/jetty-continuation-9.4.12.v20180830.jar:/usr/bin/../share/java/kafka/zookeeper-3.4.13.jar:/usr/bin/../share/java/kafka/snappy-java-1.1.7.2.jar:/usr/bin/../share/java/kafka/connect-transforms-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/zkclient-0.10.jar:/usr/bin/../share/java/kafka/jersey-client-2.27.jar:/usr/bin/../share/java/kafka/scala-reflect-2.11.12.jar:/usr/bin/../share/java/kafka/support-metrics-common-5.1.2.jar:/usr/bin/../share/java/kafka/guava-20.0.jar:/usr/bin/../share/java/kafka/jackson-mapper-asl-1.9.13.jar:/usr/bin/../share/java/kafka/common-utils-5.1.2.jar:/usr/bin/../share/java/kafka/javax.servlet-api-3.1.0.jar:/usr/bin/../share/java/kafka/kafka-clients-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/kafka-log4j-appender-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/osgi-resource-locator-1.0.1.jar:/usr/bin/../share/java/kafka/javax.ws.rs-api-2.1.jar:/usr/bin/../share/java/kafka/connect-file-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/kafka-streams-scala_2.11-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/javax.inject-2.5.0-b42.jar:/usr/bin/../share/java/kafka/activation-1.1.1.jar:/usr/bin/../share/java/kafka/jetty-servlet-9.4.12.v20180830.jar:/usr/bin/../share/java/kafka/plexus-utils-3.1.0.jar:/usr/bin/../share/java/kafka/avro-1.8.1.jar:/usr/bin/../share/java/kafka/hk2-locator-2.5.0-b42.jar:/usr/bin/../share/java/kafka/validation-api-1.1.0.Final.jar:/usr/bin/../share/java/kafka/jersey-media-jaxb-2.27.jar:/usr/bin/../share/java/kafka/hk2-utils-2.5.0-b42.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-core-2.27.jar:/usr/bin/../share/java/kafka/log4j-1.2.17.jar:/usr/bin/../share/java/kafka/kafka.jar:/usr/bin/../share/java/kafka/connect-runtime-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/paranamer-2.7.jar:/usr/bin/../share/java/kafka/jetty-servlets-9.4.12.v20180830.jar:/usr/bin/../share/java/kafka/lz4-java-1.5.0.jar:/usr/bin/../share/java/kafka/kafka_2.11-2.1.1-cp1-javadoc.jar:/usr/bin/../share/java/kafka/jersey-common-2.27.jar:/usr/bin/../share/java/kafka/jetty-security-9.4.12.v20180830.jar:/usr/bin/../share/java/kafka/javassist-3.22.0-CR2.jar:/usr/bin/../share/java/kafka/slf4j-api-1.7.25.jar:/usr/bin/../share/java/kafka/hk2-api-2.5.0-b42.jar:/usr/bin/../share/java/kafka/jackson-core-asl-1.9.13.jar:/usr/bin/../share/java/kafka/jetty-client-9.4.12.v20180830.jar:/usr/bin/../share/java/kafka/maven-artifact-3.6.0.jar:/usr/bin/../share/java/kafka/kafka_2.11-2.1.1-cp1-test-sources.jar:/usr/bin/../share/java/kafka/kafka_2.11-2.1.1-cp1-test.jar:/usr/bin/../share/java/kafka/jersey-container-servlet-2.27.jar:/usr/bin/../share/java/kafka/jersey-server-2.27.jar:/usr/bin/../share/java/kafka/xz-1.5.jar:/usr/bin/../share/java/kafka/kafka-tools-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/kafka-streams-2.1.1-cp1.jar:/usr/bin/../share/java/kafka/httpmime-4.5.2.jar:/usr/bin/../share/java/kafka/slf4j-log4j12-1.7.25.jar:/usr/bin/../share/java/kafka/httpclient-4.5.2.jar:/usr/bin/../share/java/kafka/scala-library-2.11.12.jar:/usr/bin/../share/java/kafka/support-metrics-client-5.1.2.jar:/usr/bin/../share/java/kafka/argparse4j-0.7.0.jar:/usr/bin/../share/java/kafka/jackson-annotations-2.9.8.jar:/usr/bin/../share/java/confluent-support-metrics/*:/usr/share/java/confluent-support-metrics/* (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,403] INFO Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,403] INFO Client environment:java.io.tmpdir=/tmp (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,403] INFO Client environment:java.compiler=<NA> (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,403] INFO Client environment:os.name=Linux (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,403] INFO Client environment:os.arch=amd64 (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,404] INFO Client environment:os.version=4.14.134-boot2docker (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,404] INFO Client environment:user.name=root (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,404] INFO Client environment:user.home=/root (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,404] INFO Client environment:user.dir=/ (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,406] INFO Initiating client connection, connectString=localhost:32181 sessionTimeout=6000 watcher=kafka.zookeeper.ZooKeeperClient$ZooKeeperClientWatcher$@44c03695 (org.apache.zookeeper.ZooKeeper) | |
[2019-09-03 16:12:48,417] INFO [ZooKeeperClient] Waiting until connected. (kafka.zookeeper.ZooKeeperClient) | |
[2019-09-03 16:12:48,418] INFO Opening socket connection to server localhost/127.0.0.1:32181. Will not attempt to authenticate using SASL (unknown error) (org.apache.zookeeper.ClientCnxn) | |
[2019-09-03 16:12:48,423] INFO Socket connection established to localhost/127.0.0.1:32181, initiating session (org.apache.zookeeper.ClientCnxn) | |
[2019-09-03 16:12:48,430] INFO Session establishment complete on server localhost/127.0.0.1:32181, sessionid = 0x100019eba840003, negotiated timeout = 6000 (org.apache.zookeeper.ClientCnxn) | |
[2019-09-03 16:12:48,433] INFO [ZooKeeperClient] Connected. (kafka.zookeeper.ZooKeeperClient) | |
[2019-09-03 16:12:48,661] INFO Cluster ID = xlGe1w6KRyGnwapeI3BhPQ (kafka.server.KafkaServer) | |
[2019-09-03 16:12:48,709] INFO KafkaConfig values: | |
advertised.host.name = null | |
advertised.listeners = PLAINTEXT://localhost:39092 | |
advertised.port = null | |
alter.config.policy.class.name = null | |
alter.log.dirs.replication.quota.window.num = 11 | |
alter.log.dirs.replication.quota.window.size.seconds = 1 | |
authorizer.class.name = | |
auto.create.topics.enable = true | |
auto.leader.rebalance.enable = true | |
background.threads = 10 | |
broker.id = 1 | |
broker.id.generation.enable = true | |
broker.interceptor.class = class org.apache.kafka.server.interceptor.DefaultBrokerInterceptor | |
broker.rack = null | |
client.quota.callback.class = null | |
compression.type = producer | |
connection.failed.authentication.delay.ms = 100 | |
connections.max.idle.ms = 600000 | |
controlled.shutdown.enable = true | |
controlled.shutdown.max.retries = 3 | |
controlled.shutdown.retry.backoff.ms = 5000 | |
controller.socket.timeout.ms = 30000 | |
create.topic.policy.class.name = null | |
default.replication.factor = 1 | |
delegation.token.expiry.check.interval.ms = 3600000 | |
delegation.token.expiry.time.ms = 86400000 | |
delegation.token.master.key = null | |
delegation.token.max.lifetime.ms = 604800000 | |
delete.records.purgatory.purge.interval.requests = 1 | |
delete.topic.enable = true | |
fetch.purgatory.purge.interval.requests = 1000 | |
group.initial.rebalance.delay.ms = 3000 | |
group.max.session.timeout.ms = 300000 | |
group.min.session.timeout.ms = 6000 | |
host.name = | |
inter.broker.listener.name = null | |
inter.broker.protocol.version = 2.1-IV2 | |
kafka.metrics.polling.interval.secs = 10 | |
kafka.metrics.reporters = [] | |
leader.imbalance.check.interval.seconds = 300 | |
leader.imbalance.per.broker.percentage = 10 | |
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL | |
listeners = PLAINTEXT://0.0.0.0:39092 | |
log.cleaner.backoff.ms = 15000 | |
log.cleaner.dedupe.buffer.size = 134217728 | |
log.cleaner.delete.retention.ms = 86400000 | |
log.cleaner.enable = true | |
log.cleaner.io.buffer.load.factor = 0.9 | |
log.cleaner.io.buffer.size = 524288 | |
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 | |
log.cleaner.min.cleanable.ratio = 0.5 | |
log.cleaner.min.compaction.lag.ms = 0 | |
log.cleaner.threads = 1 | |
log.cleanup.policy = [delete] | |
log.dir = /tmp/kafka-logs | |
log.dirs = /var/lib/kafka/data | |
log.flush.interval.messages = 9223372036854775807 | |
log.flush.interval.ms = null | |
log.flush.offset.checkpoint.interval.ms = 60000 | |
log.flush.scheduler.interval.ms = 9223372036854775807 | |
log.flush.start.offset.checkpoint.interval.ms = 60000 | |
log.index.interval.bytes = 4096 | |
log.index.size.max.bytes = 10485760 | |
log.message.downconversion.enable = true | |
log.message.format.version = 2.1-IV2 | |
log.message.timestamp.difference.max.ms = 9223372036854775807 | |
log.message.timestamp.type = CreateTime | |
log.preallocate = false | |
log.retention.bytes = -1 | |
log.retention.check.interval.ms = 300000 | |
log.retention.hours = 168 | |
log.retention.minutes = null | |
log.retention.ms = null | |
log.roll.hours = 168 | |
log.roll.jitter.hours = 0 | |
log.roll.jitter.ms = null | |
log.roll.ms = null | |
log.segment.bytes = 1073741824 | |
log.segment.delete.delay.ms = 60000 | |
max.connections.per.ip = 2147483647 | |
max.connections.per.ip.overrides = | |
max.incremental.fetch.session.cache.slots = 1000 | |
message.max.bytes = 1000012 | |
metric.reporters = [] | |
metrics.num.samples = 2 | |
metrics.recording.level = INFO | |
metrics.sample.window.ms = 30000 | |
min.insync.replicas = 1 | |
num.io.threads = 8 | |
num.network.threads = 3 | |
num.partitions = 1 | |
num.recovery.threads.per.data.dir = 1 | |
num.replica.alter.log.dirs.threads = null | |
num.replica.fetchers = 1 | |
offset.metadata.max.bytes = 4096 | |
offsets.commit.required.acks = -1 | |
offsets.commit.timeout.ms = 5000 | |
offsets.load.buffer.size = 5242880 | |
offsets.retention.check.interval.ms = 600000 | |
offsets.retention.minutes = 10080 | |
offsets.topic.compression.codec = 0 | |
offsets.topic.num.partitions = 50 | |
offsets.topic.replication.factor = 1 | |
offsets.topic.segment.bytes = 104857600 | |
password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding | |
password.encoder.iterations = 4096 | |
password.encoder.key.length = 128 | |
password.encoder.keyfactory.algorithm = null | |
password.encoder.old.secret = null | |
password.encoder.secret = null | |
port = 9092 | |
principal.builder.class = null | |
producer.purgatory.purge.interval.requests = 1000 | |
queued.max.request.bytes = -1 | |
queued.max.requests = 500 | |
quota.consumer.default = 9223372036854775807 | |
quota.producer.default = 9223372036854775807 | |
quota.window.num = 11 | |
quota.window.size.seconds = 1 | |
replica.fetch.backoff.ms = 1000 | |
replica.fetch.max.bytes = 1048576 | |
replica.fetch.min.bytes = 1 | |
replica.fetch.response.max.bytes = 10485760 | |
replica.fetch.wait.max.ms = 500 | |
replica.high.watermark.checkpoint.interval.ms = 5000 | |
replica.lag.time.max.ms = 10000 | |
replica.socket.receive.buffer.bytes = 65536 | |
replica.socket.timeout.ms = 30000 | |
replication.quota.window.num = 11 | |
replication.quota.window.size.seconds = 1 | |
request.timeout.ms = 30000 | |
reserved.broker.max.id = 1000 | |
sasl.client.callback.handler.class = null | |
sasl.enabled.mechanisms = [GSSAPI] | |
sasl.jaas.config = null | |
sasl.kerberos.kinit.cmd = /usr/bin/kinit | |
sasl.kerberos.min.time.before.relogin = 60000 | |
sasl.kerberos.principal.to.local.rules = [DEFAULT] | |
sasl.kerberos.service.name = null | |
sasl.kerberos.ticket.renew.jitter = 0.05 | |
sasl.kerberos.ticket.renew.window.factor = 0.8 | |
sasl.login.callback.handler.class = null | |
sasl.login.class = null | |
sasl.login.refresh.buffer.seconds = 300 | |
sasl.login.refresh.min.period.seconds = 60 | |
sasl.login.refresh.window.factor = 0.8 | |
sasl.login.refresh.window.jitter = 0.05 | |
sasl.mechanism.inter.broker.protocol = GSSAPI | |
sasl.server.callback.handler.class = null | |
security.inter.broker.protocol = PLAINTEXT | |
socket.receive.buffer.bytes = 102400 | |
socket.request.max.bytes = 104857600 | |
socket.send.buffer.bytes = 102400 | |
ssl.cipher.suites = [] | |
ssl.client.auth = none | |
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] | |
ssl.endpoint.identification.algorithm = https | |
ssl.key.password = null | |
ssl.keymanager.algorithm = SunX509 | |
ssl.keystore.location = null | |
ssl.keystore.password = null | |
ssl.keystore.type = JKS | |
ssl.protocol = TLS | |
ssl.provider = null | |
ssl.secure.random.implementation = null | |
ssl.trustmanager.algorithm = PKIX | |
ssl.truststore.location = null | |
ssl.truststore.password = null | |
ssl.truststore.type = JKS | |
transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 | |
transaction.max.timeout.ms = 900000 | |
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 | |
transaction.state.log.load.buffer.size = 5242880 | |
transaction.state.log.min.isr = 2 | |
transaction.state.log.num.partitions = 50 | |
transaction.state.log.replication.factor = 3 | |
transaction.state.log.segment.bytes = 104857600 | |
transactional.id.expiration.ms = 604800000 | |
unclean.leader.election.enable = false | |
zookeeper.connect = localhost:32181 | |
zookeeper.connection.timeout.ms = null | |
zookeeper.max.in.flight.requests = 10 | |
zookeeper.session.timeout.ms = 6000 | |
zookeeper.set.acl = false | |
zookeeper.sync.time.ms = 2000 | |
(kafka.server.KafkaConfig) | |
[2019-09-03 16:12:48,720] INFO KafkaConfig values: | |
advertised.host.name = null | |
advertised.listeners = PLAINTEXT://localhost:39092 | |
advertised.port = null | |
alter.config.policy.class.name = null | |
alter.log.dirs.replication.quota.window.num = 11 | |
alter.log.dirs.replication.quota.window.size.seconds = 1 | |
authorizer.class.name = | |
auto.create.topics.enable = true | |
auto.leader.rebalance.enable = true | |
background.threads = 10 | |
broker.id = 1 | |
broker.id.generation.enable = true | |
broker.interceptor.class = class org.apache.kafka.server.interceptor.DefaultBrokerInterceptor | |
broker.rack = null | |
client.quota.callback.class = null | |
compression.type = producer | |
connection.failed.authentication.delay.ms = 100 | |
connections.max.idle.ms = 600000 | |
controlled.shutdown.enable = true | |
controlled.shutdown.max.retries = 3 | |
controlled.shutdown.retry.backoff.ms = 5000 | |
controller.socket.timeout.ms = 30000 | |
create.topic.policy.class.name = null | |
default.replication.factor = 1 | |
delegation.token.expiry.check.interval.ms = 3600000 | |
delegation.token.expiry.time.ms = 86400000 | |
delegation.token.master.key = null | |
delegation.token.max.lifetime.ms = 604800000 | |
delete.records.purgatory.purge.interval.requests = 1 | |
delete.topic.enable = true | |
fetch.purgatory.purge.interval.requests = 1000 | |
group.initial.rebalance.delay.ms = 3000 | |
group.max.session.timeout.ms = 300000 | |
group.min.session.timeout.ms = 6000 | |
host.name = | |
inter.broker.listener.name = null | |
inter.broker.protocol.version = 2.1-IV2 | |
kafka.metrics.polling.interval.secs = 10 | |
kafka.metrics.reporters = [] | |
leader.imbalance.check.interval.seconds = 300 | |
leader.imbalance.per.broker.percentage = 10 | |
listener.security.protocol.map = PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL | |
listeners = PLAINTEXT://0.0.0.0:39092 | |
log.cleaner.backoff.ms = 15000 | |
log.cleaner.dedupe.buffer.size = 134217728 | |
log.cleaner.delete.retention.ms = 86400000 | |
log.cleaner.enable = true | |
log.cleaner.io.buffer.load.factor = 0.9 | |
log.cleaner.io.buffer.size = 524288 | |
log.cleaner.io.max.bytes.per.second = 1.7976931348623157E308 | |
log.cleaner.min.cleanable.ratio = 0.5 | |
log.cleaner.min.compaction.lag.ms = 0 | |
log.cleaner.threads = 1 | |
log.cleanup.policy = [delete] | |
log.dir = /tmp/kafka-logs | |
log.dirs = /var/lib/kafka/data | |
log.flush.interval.messages = 9223372036854775807 | |
log.flush.interval.ms = null | |
log.flush.offset.checkpoint.interval.ms = 60000 | |
log.flush.scheduler.interval.ms = 9223372036854775807 | |
log.flush.start.offset.checkpoint.interval.ms = 60000 | |
log.index.interval.bytes = 4096 | |
log.index.size.max.bytes = 10485760 | |
log.message.downconversion.enable = true | |
log.message.format.version = 2.1-IV2 | |
log.message.timestamp.difference.max.ms = 9223372036854775807 | |
log.message.timestamp.type = CreateTime | |
log.preallocate = false | |
log.retention.bytes = -1 | |
log.retention.check.interval.ms = 300000 | |
log.retention.hours = 168 | |
log.retention.minutes = null | |
log.retention.ms = null | |
log.roll.hours = 168 | |
log.roll.jitter.hours = 0 | |
log.roll.jitter.ms = null | |
log.roll.ms = null | |
log.segment.bytes = 1073741824 | |
log.segment.delete.delay.ms = 60000 | |
max.connections.per.ip = 2147483647 | |
max.connections.per.ip.overrides = | |
max.incremental.fetch.session.cache.slots = 1000 | |
message.max.bytes = 1000012 | |
metric.reporters = [] | |
metrics.num.samples = 2 | |
metrics.recording.level = INFO | |
metrics.sample.window.ms = 30000 | |
min.insync.replicas = 1 | |
num.io.threads = 8 | |
num.network.threads = 3 | |
num.partitions = 1 | |
num.recovery.threads.per.data.dir = 1 | |
num.replica.alter.log.dirs.threads = null | |
num.replica.fetchers = 1 | |
offset.metadata.max.bytes = 4096 | |
offsets.commit.required.acks = -1 | |
offsets.commit.timeout.ms = 5000 | |
offsets.load.buffer.size = 5242880 | |
offsets.retention.check.interval.ms = 600000 | |
offsets.retention.minutes = 10080 | |
offsets.topic.compression.codec = 0 | |
offsets.topic.num.partitions = 50 | |
offsets.topic.replication.factor = 1 | |
offsets.topic.segment.bytes = 104857600 | |
password.encoder.cipher.algorithm = AES/CBC/PKCS5Padding | |
password.encoder.iterations = 4096 | |
password.encoder.key.length = 128 | |
password.encoder.keyfactory.algorithm = null | |
password.encoder.old.secret = null | |
password.encoder.secret = null | |
port = 9092 | |
principal.builder.class = null | |
producer.purgatory.purge.interval.requests = 1000 | |
queued.max.request.bytes = -1 | |
queued.max.requests = 500 | |
quota.consumer.default = 9223372036854775807 | |
quota.producer.default = 9223372036854775807 | |
quota.window.num = 11 | |
quota.window.size.seconds = 1 | |
replica.fetch.backoff.ms = 1000 | |
replica.fetch.max.bytes = 1048576 | |
replica.fetch.min.bytes = 1 | |
replica.fetch.response.max.bytes = 10485760 | |
replica.fetch.wait.max.ms = 500 | |
replica.high.watermark.checkpoint.interval.ms = 5000 | |
replica.lag.time.max.ms = 10000 | |
replica.socket.receive.buffer.bytes = 65536 | |
replica.socket.timeout.ms = 30000 | |
replication.quota.window.num = 11 | |
replication.quota.window.size.seconds = 1 | |
request.timeout.ms = 30000 | |
reserved.broker.max.id = 1000 | |
sasl.client.callback.handler.class = null | |
sasl.enabled.mechanisms = [GSSAPI] | |
sasl.jaas.config = null | |
sasl.kerberos.kinit.cmd = /usr/bin/kinit | |
sasl.kerberos.min.time.before.relogin = 60000 | |
sasl.kerberos.principal.to.local.rules = [DEFAULT] | |
sasl.kerberos.service.name = null | |
sasl.kerberos.ticket.renew.jitter = 0.05 | |
sasl.kerberos.ticket.renew.window.factor = 0.8 | |
sasl.login.callback.handler.class = null | |
sasl.login.class = null | |
sasl.login.refresh.buffer.seconds = 300 | |
sasl.login.refresh.min.period.seconds = 60 | |
sasl.login.refresh.window.factor = 0.8 | |
sasl.login.refresh.window.jitter = 0.05 | |
sasl.mechanism.inter.broker.protocol = GSSAPI | |
sasl.server.callback.handler.class = null | |
security.inter.broker.protocol = PLAINTEXT | |
socket.receive.buffer.bytes = 102400 | |
socket.request.max.bytes = 104857600 | |
socket.send.buffer.bytes = 102400 | |
ssl.cipher.suites = [] | |
ssl.client.auth = none | |
ssl.enabled.protocols = [TLSv1.2, TLSv1.1, TLSv1] | |
ssl.endpoint.identification.algorithm = https | |
ssl.key.password = null | |
ssl.keymanager.algorithm = SunX509 | |
ssl.keystore.location = null | |
ssl.keystore.password = null | |
ssl.keystore.type = JKS | |
ssl.protocol = TLS | |
ssl.provider = null | |
ssl.secure.random.implementation = null | |
ssl.trustmanager.algorithm = PKIX | |
ssl.truststore.location = null | |
ssl.truststore.password = null | |
ssl.truststore.type = JKS | |
transaction.abort.timed.out.transaction.cleanup.interval.ms = 60000 | |
transaction.max.timeout.ms = 900000 | |
transaction.remove.expired.transaction.cleanup.interval.ms = 3600000 | |
transaction.state.log.load.buffer.size = 5242880 | |
transaction.state.log.min.isr = 2 | |
transaction.state.log.num.partitions = 50 | |
transaction.state.log.replication.factor = 3 | |
transaction.state.log.segment.bytes = 104857600 | |
transactional.id.expiration.ms = 604800000 | |
unclean.leader.election.enable = false | |
zookeeper.connect = localhost:32181 | |
zookeeper.connection.timeout.ms = null | |
zookeeper.max.in.flight.requests = 10 | |
zookeeper.session.timeout.ms = 6000 | |
zookeeper.set.acl = false | |
zookeeper.sync.time.ms = 2000 | |
(kafka.server.KafkaConfig) | |
[2019-09-03 16:12:48,740] INFO [ThrottledChannelReaper-Fetch]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) | |
[2019-09-03 16:12:48,741] INFO [ThrottledChannelReaper-Produce]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) | |
[2019-09-03 16:12:48,742] INFO [ThrottledChannelReaper-Request]: Starting (kafka.server.ClientQuotaManager$ThrottledChannelReaper) | |
[2019-09-03 16:12:48,775] INFO Loading logs. (kafka.log.LogManager) | |
[2019-09-03 16:12:48,834] ERROR Error while loading log dir /var/lib/kafka/data (kafka.log.LogManager) | |
java.io.IOException: Invalid argument | |
at sun.nio.ch.FileChannelImpl.map0(Native Method) | |
at sun.nio.ch.FileChannelImpl.map(FileChannelImpl.java:926) | |
at kafka.log.AbstractIndex.<init>(AbstractIndex.scala:126) | |
at kafka.log.OffsetIndex.<init>(OffsetIndex.scala:54) | |
at kafka.log.LogSegment$.open(LogSegment.scala:634) | |
at kafka.log.Log$$anonfun$kafka$log$Log$$loadSegmentFiles$3.apply(Log.scala:465) | |
at kafka.log.Log$$anonfun$kafka$log$Log$$loadSegmentFiles$3.apply(Log.scala:452) | |
at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) | |
at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) | |
at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) | |
at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) | |
at kafka.log.Log.kafka$log$Log$$loadSegmentFiles(Log.scala:452) | |
at kafka.log.Log$$anonfun$loadSegments$1.apply$mcV$sp(Log.scala:563) | |
at kafka.log.Log$$anonfun$loadSegments$1.apply(Log.scala:557) | |
at kafka.log.Log$$anonfun$loadSegments$1.apply(Log.scala:557) | |
at kafka.log.Log.retryOnOffsetOverflow(Log.scala:2007) | |
at kafka.log.Log.loadSegments(Log.scala:557) | |
at kafka.log.Log.<init>(Log.scala:290) | |
at kafka.log.Log$.apply(Log.scala:2140) | |
at kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:265) | |
at kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:345) | |
at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:63) | |
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) | |
at java.util.concurrent.FutureTask.run(FutureTask.java:266) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) | |
at java.lang.Thread.run(Thread.java:748) | |
[2019-09-03 16:12:48,840] ERROR Error while deleting the clean shutdown file in dir /var/lib/kafka/data (kafka.server.LogDirFailureChannel) | |
java.io.IOException: Invalid argument | |
at sun.nio.ch.FileChannelImpl.map0(Native Method) | |
at sun.nio.ch.FileChannelImpl.map(FileChannelImpl.java:926) | |
at kafka.log.AbstractIndex.<init>(AbstractIndex.scala:126) | |
at kafka.log.OffsetIndex.<init>(OffsetIndex.scala:54) | |
at kafka.log.LogSegment$.open(LogSegment.scala:634) | |
at kafka.log.Log$$anonfun$kafka$log$Log$$loadSegmentFiles$3.apply(Log.scala:465) | |
at kafka.log.Log$$anonfun$kafka$log$Log$$loadSegmentFiles$3.apply(Log.scala:452) | |
at scala.collection.TraversableLike$WithFilter$$anonfun$foreach$1.apply(TraversableLike.scala:733) | |
at scala.collection.IndexedSeqOptimized$class.foreach(IndexedSeqOptimized.scala:33) | |
at scala.collection.mutable.ArrayOps$ofRef.foreach(ArrayOps.scala:186) | |
at scala.collection.TraversableLike$WithFilter.foreach(TraversableLike.scala:732) | |
at kafka.log.Log.kafka$log$Log$$loadSegmentFiles(Log.scala:452) | |
at kafka.log.Log$$anonfun$loadSegments$1.apply$mcV$sp(Log.scala:563) | |
at kafka.log.Log$$anonfun$loadSegments$1.apply(Log.scala:557) | |
at kafka.log.Log$$anonfun$loadSegments$1.apply(Log.scala:557) | |
at kafka.log.Log.retryOnOffsetOverflow(Log.scala:2007) | |
at kafka.log.Log.loadSegments(Log.scala:557) | |
at kafka.log.Log.<init>(Log.scala:290) | |
at kafka.log.Log$.apply(Log.scala:2140) | |
at kafka.log.LogManager.kafka$log$LogManager$$loadLog(LogManager.scala:265) | |
at kafka.log.LogManager$$anonfun$loadLogs$2$$anonfun$11$$anonfun$apply$15$$anonfun$apply$2.apply$mcV$sp(LogManager.scala:345) | |
at kafka.utils.CoreUtils$$anon$1.run(CoreUtils.scala:63) | |
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) | |
at java.util.concurrent.FutureTask.run(FutureTask.java:266) | |
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) | |
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) | |
at java.lang.Thread.run(Thread.java:748) | |
[2019-09-03 16:12:48,843] INFO Logs loading complete in 68 ms. (kafka.log.LogManager) | |
[2019-09-03 16:12:48,860] INFO Starting log cleanup with a period of 300000 ms. (kafka.log.LogManager) | |
[2019-09-03 16:12:48,862] INFO Starting log flusher with a default period of 9223372036854775807 ms. (kafka.log.LogManager) | |
[2019-09-03 16:12:48,865] INFO Starting the log cleaner (kafka.log.LogCleaner) | |
[2019-09-03 16:12:48,930] INFO [kafka-log-cleaner-thread-0]: Starting (kafka.log.LogCleaner) | |
[2019-09-03 16:12:49,154] INFO Awaiting socket connections on 0.0.0.0:39092. (kafka.network.Acceptor) | |
[2019-09-03 16:12:49,186] INFO [SocketServer brokerId=1] Started 1 acceptor threads (kafka.network.SocketServer) | |
[2019-09-03 16:12:49,199] INFO [ExpirationReaper-1-Produce]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
[2019-09-03 16:12:49,201] INFO [ExpirationReaper-1-Fetch]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
[2019-09-03 16:12:49,202] INFO [ExpirationReaper-1-DeleteRecords]: Starting (kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper) | |
[2019-09-03 16:12:49,213] INFO [LogDirFailureHandler]: Starting (kafka.server.ReplicaManager$LogDirFailureHandler) | |
[2019-09-03 16:12:49,215] INFO [ReplicaManager broker=1] Stopping serving replicas in dir /var/lib/kafka/data (kafka.server.ReplicaManager) | |
[2019-09-03 16:12:49,217] INFO [ReplicaFetcherManager on broker 1] Removed fetcher for partitions Set() (kafka.server.ReplicaFetcherManager) | |
[2019-09-03 16:12:49,218] INFO [ReplicaAlterLogDirsManager on broker 1] Removed fetcher for partitions Set() (kafka.server.ReplicaAlterLogDirsManager) | |
[2019-09-03 16:12:49,221] INFO [ReplicaManager broker=1] Broker 1 stopped fetcher for partitions and stopped moving logs for partitions because they are in the failed log directory /var/lib/kafka/data. (kafka.server.ReplicaManager) | |
[2019-09-03 16:12:49,222] INFO Stopping serving logs in dir /var/lib/kafka/data (kafka.log.LogManager) | |
[2019-09-03 16:12:49,224] ERROR Shutdown broker because all log dirs in /var/lib/kafka/data have failed (kafka.log.LogManager) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment