| kerberos管理节点 | nyyjh1 |
| Kafka节点 | nyyjh2 nyyjh3 nyyjh4 |
| Zookeeper节点 | nyyjh1 nyyjh2 nyyjh3 |
[root@nyyjh1 ~]#kadmin.local Authenticating as principal root/admin@EXAMPLE.COM with password. kadmin.local:addprinc -randkey kafka/nyyjh2@EXAMPLE.COM kadmin.local:addprinc -randkey kafka/nyyjh3@EXAMPLE.COM kadmin.local:addprinc -randkey kafka/nyyjh4@EXAMPLE.COM kadmin.local:addprinc -randkey zookeeper/nyyjh2@EXAMPLE.COM kadmin.local:addprinc -randkey zookeeper/nyyjh3@EXAMPLE.COM kadmin.local:addprinc -randkey zookeeper/nyyjh4@EXAMPLE.COM kadmin.local:xst -k kafka.keytab kafka/nyyjh2@EXAMPLE.COM kadmin.local:xst -k kafka.keytab kafka/nyyjh3@EXAMPLE.COM kadmin.local:xst -k kafka.keytab kafka/nyyjh4@EXAMPLE.COM exit [root@nyyjh1 ~]#scp kafka.keytab root@nyyjh2:/etc/kafka1/conf/ [root@nyyjh1 ~]#scp kafka.keytab root@nyyjh3:/etc/kafka1/conf/ [root@nyyjh1 ~]#scp kafka.keytab root@nyyjh4:/etc/kafka1/conf/2.配置Kafka各个节点的server.properties文件
listeners=SASL_PLAINTEXT://nyyjh2:9092 advertised.listeners=SASL_PLAINTEXT://nyyjh2:9092 sasl.enabled.mechanisms=GSSAPI security.inter.broker.protocol=SASL_PLAINTEXT sasl.kerberos.service.name=kafka authorizer.class.name=kafka.security.auth.SimpleAclAuthorizer super.users=User:kafka3.配置Kafka各个节点的consumer.properties文件
key.deserializer=org.apache.kafka.common.serialization.StringDeserializer value.deserializer=org.apache.kafka.common.serialization.StringDeserializer key.serializer=org.apache.kafka.common.serialization.StringSerializer value.serializer=org.apache.kafka.common.serialization.StringSerializer security.protocol=SASL_PLAINTEXT sasl.mechanism=GSSAPI sasl.kerberos.service.name=kafka4.配置Kafka各个节点的producer.properties文件
key.deserializer=org.apache.kafka.common.serialization.StringDeserializer value.deserializer=org.apache.kafka.common.serialization.StringDeserializer key.serializer=org.apache.kafka.common.serialization.StringSerializer value.serializer=org.apache.kafka.common.serialization.StringSerializer security.protocol=SASL_PLAINTEXT sasl.mechanism=GSSAPI sasl.kerberos.service.name=kafka5.配置Kafka各个节点的bin/kafka-run-class.sh
增加KAFKA_KERBEROS_OPTS配置项,其中配置kerberos配置文件和kafka认证文件。并将该参数添加到启动时候的$JAVA后边。
# JVM performance options if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true -Djava.security.krb5.conf=/etc/kafka1/conf/krb5.conf -Djava.security.auth.login.config=/etc/kafka1/conf/jaas.conf" fi6.配置Kafka各个节点jaas.conf文件
KafkaServer {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka1/conf/kafka.keytab"
principal="kafka/节点名称@EXAMPLE.COM";
};
// ZooKeeper client authentication
KafkaClient {
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/etc/kafka1/conf/kafka.keytab"
principal="kafka/节点名称@EXAMPLE.COM";
};
7.配置Kafka各个节点krb5.conf文件
[logging]
default = FILE:/var/log/krb5libs.log
kdc = FILE:/var/log/krb5kdc.log
admin_server = FILE:/var/log/kadmind.log
[libdefaults]
dns_lookup_realm = false
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
pkinit_anchors = FILE:/etc/pki/tls/certs/ca-bundle.crt
default_realm = EXAMPLE.COM
default_ccache_name = KEYRING:persistent:%{uid}
[realms]
EXAMPLE.COM = {
kdc = nyyjh1
admin_server = nyyjh1
}
[domain_realm]
# .example.com = EXAMPLE.COM
# example.com = EXAMPLE.COM
8.启动Kafka测试
/usr/local/kafka/bin/kafka-server-start.sh -daemon /usr/local/kafka/config/server.properties9.创建topic测试
kafka-topics.sh --create --partitions 1 --replication-factor 3 --zookeeper nyyjh1:2181,nyyjh2:2181,nyyjh3:2181 --topic testKafka110.终端测试
#启动生产端 kafka-console-producer.sh --broker-list nyyjh2:9092,nyyjh3:9092,nyyjh4:9092 --topic testKafka --producer.config /usr/local/kafka/config/producer.properties #启动消费端 kafka-console-consumer.sh --bootstrap-server nyyjh2:9092,nyyjh3:9092,192.168.122.104:9092 --consumer.config /usr/local/kafka/config/consumer.properties --topic testKafka --from-beginning完成!



