Просмотр исходного кода

注释kafka相关配置信息以及xxl配置信息

chenhongyan1989 11 месяцев назад
Родитель
Сommit
327d497319

+ 74 - 74
energy-manage-service/src/main/java/com/energy/manage/service/config/kafka/KafkaConsumerConfig.java

@@ -17,82 +17,82 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-@Configuration
+//@Configuration
 @Slf4j
 public class KafkaConsumerConfig {
 
-    @Value("${spring.kafka.consumer.bootstrapServers}")
-    private String bootstrapServers;
-//    @Value("${spring.kafka.consumer.topics}")
-//    private List<String> topics;
-    @Value("${spring.kafka.consumer.groupId}")
-    private String groupId;
-    @Value("${spring.kafka.consumer.sessionTimeOut}")
-    private String sessionTimeOut;
-    @Value("${spring.kafka.consumer.enableAutoCommit}")
-    private String enableAutoCommit;
-    @Value("${spring.kafka.consumer.autoCommitInterval}")
-    private String autoCommitInterval;
-    @Value("${spring.kafka.consumer.maxPollRecords}")
-    private String maxPollRecords;
-    @Value("${spring.kafka.consumer.maxPollInterval}")
-    private String maxPollInterval;
-    @Value("${spring.kafka.consumer.heartbeatInterval}")
-    private String heartbeatInterval;
-    @Value("${spring.kafka.consumer.keyDeserializer}")
-    private String keyDeserializer;
-    @Value("${spring.kafka.consumer.valueDeserializer}")
-    private String valueDeserializer;
-    @Value("${spring.kafka.consumer.autoOffsetReset}")
-    private String autoOffsetReset;
-
-    @Bean
-    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
-        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
-        factory.setConsumerFactory(consumerFactory());
-        // 并发数 多个微服务实例会均分
-        factory.setConcurrency(1);
-        factory.setBatchListener(true);
-        ContainerProperties containerProperties = factory.getContainerProperties();
-        // 是否设置手动提交
-        containerProperties.setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
-        return factory;
-    }
-
-    private ConsumerFactory<String, String> consumerFactory() {
-        Map<String, Object> consumerConfigs = consumerConfigs();
-        log.info("消费者的配置信息:{}", JSONObject.toJSONString(consumerConfigs));
-        return new DefaultKafkaConsumerFactory<>(consumerConfigs);
-    }
-
-
-    @Bean
-    public Map<String, Object> consumerConfigs() {
-        Map<String, Object> propsMap = new HashMap<>();
-        // 服务器地址
-        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
-        // 是否自动提交
-        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
-        // 自动提交间隔
-        propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
-        //会话时间
-        propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeOut);
-        //key序列化
-        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
-        //value序列化
-        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
-        // 心跳时间
-        propsMap.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, heartbeatInterval);
-
-        // 分组id
-//        propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
-        //消费策略
-        propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
-        // poll记录数
-        propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
-        //poll时间
-        propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollInterval);
-        return propsMap;
-    }
+//    @Value("${spring.kafka.consumer.bootstrapServers}")
+//    private String bootstrapServers;
+////    @Value("${spring.kafka.consumer.topics}")
+////    private List<String> topics;
+//    @Value("${spring.kafka.consumer.groupId}")
+//    private String groupId;
+//    @Value("${spring.kafka.consumer.sessionTimeOut}")
+//    private String sessionTimeOut;
+//    @Value("${spring.kafka.consumer.enableAutoCommit}")
+//    private String enableAutoCommit;
+//    @Value("${spring.kafka.consumer.autoCommitInterval}")
+//    private String autoCommitInterval;
+//    @Value("${spring.kafka.consumer.maxPollRecords}")
+//    private String maxPollRecords;
+//    @Value("${spring.kafka.consumer.maxPollInterval}")
+//    private String maxPollInterval;
+//    @Value("${spring.kafka.consumer.heartbeatInterval}")
+//    private String heartbeatInterval;
+//    @Value("${spring.kafka.consumer.keyDeserializer}")
+//    private String keyDeserializer;
+//    @Value("${spring.kafka.consumer.valueDeserializer}")
+//    private String valueDeserializer;
+//    @Value("${spring.kafka.consumer.autoOffsetReset}")
+//    private String autoOffsetReset;
+//
+//    @Bean
+//    public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> kafkaListenerContainerFactory() {
+//        ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
+//        factory.setConsumerFactory(consumerFactory());
+//        // 并发数 多个微服务实例会均分
+//        factory.setConcurrency(1);
+//        factory.setBatchListener(true);
+//        ContainerProperties containerProperties = factory.getContainerProperties();
+//        // 是否设置手动提交
+//        containerProperties.setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
+//        return factory;
+//    }
+//
+//    private ConsumerFactory<String, String> consumerFactory() {
+//        Map<String, Object> consumerConfigs = consumerConfigs();
+//        log.info("消费者的配置信息:{}", JSONObject.toJSONString(consumerConfigs));
+//        return new DefaultKafkaConsumerFactory<>(consumerConfigs);
+//    }
+//
+//
+//    @Bean
+//    public Map<String, Object> consumerConfigs() {
+//        Map<String, Object> propsMap = new HashMap<>();
+//        // 服务器地址
+//        propsMap.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
+//        // 是否自动提交
+//        propsMap.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
+//        // 自动提交间隔
+//        propsMap.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, autoCommitInterval);
+//        //会话时间
+//        propsMap.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeOut);
+//        //key序列化
+//        propsMap.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer);
+//        //value序列化
+//        propsMap.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer);
+//        // 心跳时间
+//        propsMap.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, heartbeatInterval);
+//
+//        // 分组id
+////        propsMap.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
+//        //消费策略
+//        propsMap.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
+//        // poll记录数
+//        propsMap.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
+//        //poll时间
+//        propsMap.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, maxPollInterval);
+//        return propsMap;
+//    }
 
 }

+ 39 - 39
energy-manage-service/src/main/java/com/energy/manage/service/controller/system/SysTestController.java

@@ -20,47 +20,47 @@ import java.util.List;
  * @author sy
  * @date 2021-10-18 13:32:05
  */
-@Slf4j
-@RestController
-@RequestMapping("SysTestController")
-@CrossOrigin
-@Api(value = "SysTestController", tags = "测试Controller")
+//@Slf4j
+//@RestController
+//@RequestMapping("SysTestController")
+//@CrossOrigin
+//@Api(value = "SysTestController", tags = "测试Controller")
 public class SysTestController {
 
-	@Autowired
-	private KafkaSendService kafkaSendService;
-
-	@GetMapping("/topic-abc-test")
-	public void abc() {
-		// 发送消息
-		kafkaSendService.sendMessage("abc", "abc kafka");
-	}
-
-
-	@GetMapping("/topic-test-test")
-	public void test() {
-		// 发送消息
-		kafkaSendService.sendMessage("test", "test kafka");
-	}
-
-
-
-	@GetMapping("/test-execl")
-	public void testExecl() {
-
-		File file = new File("/Users/shiyue/Downloads/zn/F001_min.xlsx");
-		List<MinTestDataVo> list = EasyExcel.read(file).head(MinTestDataVo.class).sheet().doReadSync();
-		int i = 0;
-//		list.stream().forEach(x -> {
-//			log.info(x.getFengsu());
-//		});
-
-		for(MinTestDataVo minTestDataVo : list){
-			log.info(minTestDataVo.getFengsu());
-			i++;
-		}
-		System.out.println(i);
-	}
+//	@Autowired
+//	private KafkaSendService kafkaSendService;
+//
+//	@GetMapping("/topic-abc-test")
+//	public void abc() {
+//		// 发送消息
+//		kafkaSendService.sendMessage("abc", "abc kafka");
+//	}
+//
+//
+//	@GetMapping("/topic-test-test")
+//	public void test() {
+//		// 发送消息
+//		kafkaSendService.sendMessage("test", "test kafka");
+//	}
+
+
+
+//	@GetMapping("/test-execl")
+//	public void testExecl() {
+//
+//		File file = new File("/Users/shiyue/Downloads/zn/F001_min.xlsx");
+//		List<MinTestDataVo> list = EasyExcel.read(file).head(MinTestDataVo.class).sheet().doReadSync();
+//		int i = 0;
+////		list.stream().forEach(x -> {
+////			log.info(x.getFengsu());
+////		});
+//
+//		for(MinTestDataVo minTestDataVo : list){
+//			log.info(minTestDataVo.getFengsu());
+//			i++;
+//		}
+//		System.out.println(i);
+//	}
 
 
 

+ 27 - 27
energy-manage-service/src/main/java/com/energy/manage/service/service/kafka/customer/KafkaListenerService.java

@@ -9,35 +9,35 @@ import org.springframework.stereotype.Component;
 import java.util.List;
 
 @Slf4j
-@Component
+//@Component
 public class KafkaListenerService {
 
-    // 接收消息
-    @KafkaListener(topics = "test",groupId = "${spring.kafka.consumer.groupId}",
-            containerFactory = "kafkaListenerContainerFactory",
-            concurrency = "1",
-            properties = {"${spring.kafka.consumer.autoOffsetReset}"})
-    public void processMessage1(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
-        for (ConsumerRecord<String, String> record : records) {
-            log.info("topic_test 消费了: Topic:" + record.topic() + ",Message:" + record.value());
-            //手动提交偏移量
-            ack.acknowledge();
-        }
-    }
-
-
-    // 接收消息
-    @KafkaListener(topics = "abc",groupId = "${spring.kafka.consumer.groupId}",
-            containerFactory = "kafkaListenerContainerFactory",
-            concurrency = "1",
-            properties = {"${spring.kafka.consumer.autoOffsetReset}"})
-    public void processMessage2(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
-        for (ConsumerRecord<String, String> record : records) {
-            log.info("topic_test 消费了: Topic:" + record.topic() + ",Message:" + record.value());
-            //手动提交偏移量
-            ack.acknowledge();
-        }
-    }
+//    // 接收消息
+//    @KafkaListener(topics = "test",groupId = "${spring.kafka.consumer.groupId}",
+//            containerFactory = "kafkaListenerContainerFactory",
+//            concurrency = "1",
+//            properties = {"${spring.kafka.consumer.autoOffsetReset}"})
+//    public void processMessage1(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
+//        for (ConsumerRecord<String, String> record : records) {
+//            log.info("topic_test 消费了: Topic:" + record.topic() + ",Message:" + record.value());
+//            //手动提交偏移量
+//            ack.acknowledge();
+//        }
+//    }
+//
+//
+//    // 接收消息
+//    @KafkaListener(topics = "abc",groupId = "${spring.kafka.consumer.groupId}",
+//            containerFactory = "kafkaListenerContainerFactory",
+//            concurrency = "1",
+//            properties = {"${spring.kafka.consumer.autoOffsetReset}"})
+//    public void processMessage2(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
+//        for (ConsumerRecord<String, String> record : records) {
+//            log.info("topic_test 消费了: Topic:" + record.topic() + ",Message:" + record.value());
+//            //手动提交偏移量
+//            ack.acknowledge();
+//        }
+//    }
 
 
 

+ 50 - 50
energy-manage-service/src/main/java/com/energy/manage/service/service/kafka/producer/KafkaSendService.java

@@ -7,56 +7,56 @@ import java.util.concurrent.ExecutionException;
 
 public interface KafkaSendService {
 
-        /**
-         * 发送同步消息
-         *
-         * @param topic
-         * @param data
-         * @throws ExecutionException
-         * @throws InterruptedException
-         */
-        void sendSyncMessage(String topic, String data) throws ExecutionException, InterruptedException;
-
-        /**
-         * 发送普通消息
-         *
-         * @param topic
-         * @param data
-         */
-        void sendMessage(String topic, String data);
-
-
-        /**
-         * 发送带附加信息的消息
-         *
-         * @param record
-         */
-        void sendMessage(ProducerRecord<String, String> record);
-
-
-        /**
-         * 发送Message消息
-         *
-         * @param message
-         */
-        void sendMessage(Message<String> message);
-
-
-        /**
-         * 发送带key的消息
-         *
-         * @param topic
-         * @param key
-         * @param data
-         */
-        void sendMessage(String topic, String key, String data);
-
-        void sendMessage(String topic, Integer partition, String key, String data);
-
-
-        void sendMessage(String topic, Integer partition, Long timestamp, String key, String data);
-
-
+//        /**
+//         * 发送同步消息
+//         *
+//         * @param topic
+//         * @param data
+//         * @throws ExecutionException
+//         * @throws InterruptedException
+//         */
+//        void sendSyncMessage(String topic, String data) throws ExecutionException, InterruptedException;
+//
+//        /**
+//         * 发送普通消息
+//         *
+//         * @param topic
+//         * @param data
+//         */
+//        void sendMessage(String topic, String data);
+//
+//
+//        /**
+//         * 发送带附加信息的消息
+//         *
+//         * @param record
+//         */
+//        void sendMessage(ProducerRecord<String, String> record);
+//
+//
+//        /**
+//         * 发送Message消息
+//         *
+//         * @param message
+//         */
+//        void sendMessage(Message<String> message);
+//
+//
+//        /**
+//         * 发送带key的消息
+//         *
+//         * @param topic
+//         * @param key
+//         * @param data
+//         */
+//        void sendMessage(String topic, String key, String data);
+//
+//        void sendMessage(String topic, Integer partition, String key, String data);
+//
+//
+//        void sendMessage(String topic, Integer partition, Long timestamp, String key, String data);
+//
+//
 
 
 }

+ 97 - 97
energy-manage-service/src/main/java/com/energy/manage/service/service/kafka/producer/impl/SendServiceImpl.java

@@ -15,103 +15,103 @@ import org.springframework.util.concurrent.ListenableFutureCallback;
 import java.util.concurrent.ExecutionException;
 
 @Slf4j
-@Service
+//@Service
 public class SendServiceImpl implements KafkaSendService {
 
-    @Autowired
-    private KafkaTemplate<String, String> kafkaTemplate;
-
-    @Override
-    public void sendSyncMessage(String topic, String data) throws ExecutionException, InterruptedException {
-        SendResult<String, String> sendResult = kafkaTemplate.send(topic, data).get();
-        RecordMetadata recordMetadata = sendResult.getRecordMetadata();
-        log.debug("sendSyncMessage 发送同步消息成功!发送的主题为:{}", recordMetadata.topic());
-    }
-
-    @Override
-    public void sendMessage(String topic, String data) {
-        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, data);
-        future.addCallback(
-                success -> log.info("sendMessage topic={}发送消息成功!",topic),
-                failure -> log.error("sendMessage 发送消息失败!失败原因是:{}", failure.getMessage())
-        );
-    }
-
-    @Override
-    public void sendMessage(ProducerRecord<String, String> record) {
-        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(record);
-        future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
-            @Override
-            public void onFailure(Throwable throwable) {
-                log.error("发送消息失败!失败原因是:{}", throwable.getMessage());
-            }
-
-            @Override
-            public void onSuccess(SendResult<String, String> sendResult) {
-                RecordMetadata metadata = sendResult.getRecordMetadata();
-                log.debug("发送消息成功!消息主题是:{},消息分区是:{}", metadata.topic(), metadata.partition());
-            }
-        });
-    }
-
-    @Override
-    public void sendMessage(Message<String> message) {
-        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(message);
-        future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
-            @Override
-            public void onFailure(Throwable throwable) {
-                log.error("发送消息失败!失败原因是:{}", throwable.getMessage());
-            }
-
-            @Override
-            public void onSuccess(SendResult<String, String> sendResult) {
-                RecordMetadata metadata = sendResult.getRecordMetadata();
-                log.debug("发送消息成功!消息主题是:{},消息分区是:{}", metadata.topic(), metadata.partition());
-            }
-        });
-    }
-
-    @Override
-    public void sendMessage(String topic, String key, String data) {
-        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, key, data);
-        log.info("发送到:{} ,消息体为:{}",topic,data);
-        future.addCallback(
-                success -> log.debug("发送消息成功!"),
-                failure -> log.error("发送消息失败!失败原因是:{}", failure.getMessage())
-        );
-    }
-
-    @Override
-    public void sendMessage(String topic, Integer partition, String key, String data) {
-        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, partition, key, data);
-        future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
-            @Override
-            public void onFailure(Throwable throwable) {
-                log.error("发送消息失败!失败原因是:{}", throwable.getMessage());
-            }
-
-            @Override
-            public void onSuccess(SendResult<String, String> sendResult) {
-                RecordMetadata metadata = sendResult.getRecordMetadata();
-                log.debug("发送消息成功!消息主题是:{},消息分区是:{}", metadata.topic(), metadata.partition());
-            }
-        });
-    }
-
-    @Override
-    public void sendMessage(String topic, Integer partition, Long timestamp, String key, String data) {
-        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, partition, timestamp, key, data);
-        future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
-            @Override
-            public void onFailure(Throwable throwable) {
-                log.error("发送消息失败!失败原因是:{}", throwable.getMessage());
-            }
-
-            @Override
-            public void onSuccess(SendResult<String, String> sendResult) {
-                RecordMetadata metadata = sendResult.getRecordMetadata();
-                log.debug("发送消息成功!消息主题是:{},消息分区是:{}", metadata.topic(), metadata.partition());
-            }
-        });
-    }
+//    @Autowired
+//    private KafkaTemplate<String, String> kafkaTemplate;
+//
+//    @Override
+//    public void sendSyncMessage(String topic, String data) throws ExecutionException, InterruptedException {
+//        SendResult<String, String> sendResult = kafkaTemplate.send(topic, data).get();
+//        RecordMetadata recordMetadata = sendResult.getRecordMetadata();
+//        log.debug("sendSyncMessage 发送同步消息成功!发送的主题为:{}", recordMetadata.topic());
+//    }
+//
+//    @Override
+//    public void sendMessage(String topic, String data) {
+//        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, data);
+//        future.addCallback(
+//                success -> log.info("sendMessage topic={}发送消息成功!",topic),
+//                failure -> log.error("sendMessage 发送消息失败!失败原因是:{}", failure.getMessage())
+//        );
+//    }
+//
+//    @Override
+//    public void sendMessage(ProducerRecord<String, String> record) {
+//        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(record);
+//        future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
+//            @Override
+//            public void onFailure(Throwable throwable) {
+//                log.error("发送消息失败!失败原因是:{}", throwable.getMessage());
+//            }
+//
+//            @Override
+//            public void onSuccess(SendResult<String, String> sendResult) {
+//                RecordMetadata metadata = sendResult.getRecordMetadata();
+//                log.debug("发送消息成功!消息主题是:{},消息分区是:{}", metadata.topic(), metadata.partition());
+//            }
+//        });
+//    }
+//
+//    @Override
+//    public void sendMessage(Message<String> message) {
+//        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(message);
+//        future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
+//            @Override
+//            public void onFailure(Throwable throwable) {
+//                log.error("发送消息失败!失败原因是:{}", throwable.getMessage());
+//            }
+//
+//            @Override
+//            public void onSuccess(SendResult<String, String> sendResult) {
+//                RecordMetadata metadata = sendResult.getRecordMetadata();
+//                log.debug("发送消息成功!消息主题是:{},消息分区是:{}", metadata.topic(), metadata.partition());
+//            }
+//        });
+//    }
+//
+//    @Override
+//    public void sendMessage(String topic, String key, String data) {
+//        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, key, data);
+//        log.info("发送到:{} ,消息体为:{}",topic,data);
+//        future.addCallback(
+//                success -> log.debug("发送消息成功!"),
+//                failure -> log.error("发送消息失败!失败原因是:{}", failure.getMessage())
+//        );
+//    }
+//
+//    @Override
+//    public void sendMessage(String topic, Integer partition, String key, String data) {
+//        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, partition, key, data);
+//        future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
+//            @Override
+//            public void onFailure(Throwable throwable) {
+//                log.error("发送消息失败!失败原因是:{}", throwable.getMessage());
+//            }
+//
+//            @Override
+//            public void onSuccess(SendResult<String, String> sendResult) {
+//                RecordMetadata metadata = sendResult.getRecordMetadata();
+//                log.debug("发送消息成功!消息主题是:{},消息分区是:{}", metadata.topic(), metadata.partition());
+//            }
+//        });
+//    }
+//
+//    @Override
+//    public void sendMessage(String topic, Integer partition, Long timestamp, String key, String data) {
+//        ListenableFuture<SendResult<String, String>> future = kafkaTemplate.send(topic, partition, timestamp, key, data);
+//        future.addCallback(new ListenableFutureCallback<SendResult<String, String>>() {
+//            @Override
+//            public void onFailure(Throwable throwable) {
+//                log.error("发送消息失败!失败原因是:{}", throwable.getMessage());
+//            }
+//
+//            @Override
+//            public void onSuccess(SendResult<String, String> sendResult) {
+//                RecordMetadata metadata = sendResult.getRecordMetadata();
+//                log.debug("发送消息成功!消息主题是:{},消息分区是:{}", metadata.topic(), metadata.partition());
+//            }
+//        });
+//    }
 }

+ 8 - 8
energy-manage-service/src/main/resources/bootstrap.properties

@@ -56,14 +56,14 @@ template.draught = http://192.168.50.234:6900/bucket-zhzn/template/%E9%A3%8E%E6%
 
 
 #xxl-job
-xxl.job.admin.addresses = http://192.168.50.234:16800/xxl-job-admin
-xxl.job.admin.accessToken = default_token
-xxl.job.executor.appname = xxl-job-executor
-xxl.job.executor.address =
-xxl.job.executor.ip =
-xxl.job.executor.port = 9999
-xxl.job.executor.logpath = /usr/local/logs/xxl
-xxl.job.executor.logretentiondays = 30
+#xxl.job.admin.addresses = http://192.168.50.234:16800/xxl-job-admin
+#xxl.job.admin.accessToken = default_token
+#xxl.job.executor.appname = xxl-job-executor
+#xxl.job.executor.address =
+#xxl.job.executor.ip =
+#xxl.job.executor.port = 9999
+#xxl.job.executor.logpath = /usr/local/logs/xxl
+#xxl.job.executor.logretentiondays = 30
 
 
 #分析算法入参json格式