yaml 文件4.0.0 spring-boot-starter-parent org.springframework.boot 2.0.1.RELEASE org.example mySpider1.0-SNAPSHOT org.springframework.boot spring-boot-starter-web2.4.2 com.alibaba druid-spring-boot-starter1.2.4 org.springframework.boot spring-boot-starter-test2.4.2 test mysql mysql-connector-java8.0.22 com.alibaba fastjson1.2.73 com.google.guava guava22.0 org.jsoup jsoup1.14.2 org.apache.commons commons-lang33.9 com.baomidou mybatis-plus-boot-starter3.4.1 com.baomidou mybatis-plus-generator3.4.1 org.freemarker freemarker2.3.28 org.projectlombok lombok1.18.8 org.apache.httpcomponents httpclient4.5.4 org.springframework.boot spring-boot-starter-data-redisjunit junit4.12 test
spring:
datasource:
url: jdbc:mysql://localhost:3306/spider?useUnicode=true&characterEncoding=utf8&serverTimezone=UTC
driver-class-name: com.mysql.cj.jdbc.Driver
username: root
password: root
dbcp2:
min-idle: 5
initial-size: 5
max-total: 5
max-wait-millis: 100
redis:
database: 0
host: 127.0.0.1
port: 6379
password: 123456
mysql表
SET NAMES utf8mb4; SET FOREIGN_KEY_CHECKS = 0; -- ---------------------------- -- Table structure for goods_info -- ---------------------------- DROP TABLE IF EXISTS `goods_info`; CREATE TABLE `goods_info` ( `id` int(11) NOT NULL AUTO_INCREMENT, `goods_id` varchar(255) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, `goods_name` varchar(255) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL, `goods_price` varchar(255) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL, `img_url` varchar(255) CHARACTER SET utf8 COLLATE utf8_bin NULL DEFAULT NULL, PRIMARY KEY (`id`) USING BTREE ) ENGINE = InnoDB AUTO_INCREMENT = 1 CHARACTER SET = utf8 COLLATE = utf8_bin ROW_FORMAT = Dynamic; SET FOREIGN_KEY_CHECKS = 1;代码结构 技术点
1.SpringBoot
2.SpringMVC
3.HttpClinet
4.Jsoup
5.多线程(线程池)
6.redis
7.mysql(mybatis-plus)
1.启动类进行启动,开启 @PostConstruct,调用spiderHandle
2.spiderHandle
在spiderHandle中,使用线程池处理任务,线程池工厂和拒绝策略由自己确定;同时引入countDownLatch进行线程同步,使主线程等待线程池的所有任务结束,便于计时。
3.SpiderService处理爬取数据,进行解析,同时批量插入到数据库中.注意:因为是多线程成爬取,如果爬取的数据需要存入集合,需要采用并发安全的List,这里使用了synchronized锁
4.GoodsInfoMapper
是使用mybatis-plus生成得到
5.Redis队列
因为在爬取的时候可能因为网络等原因,爬取的那一条数据会失败。因此,将爬取的页码放入到redis中。
redisTemplate.opsForList().leftPush("page",parms.get("page"));
我在后台重新启动一个线程,自旋的形式将Redis的队列中的数据阻塞式取出。然后再一次爬取。
for (int i = 1; i < 201; i += 2) {
Map params = new HashMap<>();
params.put("keyword", "零食");
params.put("enc", "utf-8");
params.put("wc", "零食");
params.put("page", i + "");
threadPoolExecutor.execute(() -> {
goodsInfoService.spiderData(SysConstant.BASE_URL, params);
countDownLatch.countDown();
});
}
爬虫思路:
源代码地址
源代码在本人github上:多线程爬虫



