引言
应用 Java 的开源库,编写一个搜索引擎,这个引擎能爬取一个网站的内容。并根据网页内容进行深度爬取,获取所有相关的网页地址和内容,用户可以通过关键词,搜索所有相关的网址。
具体功能
(1) 用户可以指定爬取一个url对应的网页的内容。
(2) 对网页内容进行解析,并获取其中所有的url链接地址。
(3) 用户可以设定爬取深度,代表着从初始url对应的页面开始,可以爬取其中所有的url对应的网页内的url,以此类推。深度越大,能爬取到的网站越多。
(4) 对爬取到的url内容进行保存、建立索引。建立索引的内容是url地址本身,和url对应的网页标题。
(5) 用户可以通过关键词对网址进行搜索,找出有该关键词的url地址。
(6) 建立索引和搜索索引的过程能智能识别中文关键词,能对关键词进行分词操作。
(7) 用户可以指定保存索引的地址、初始url、爬取深度、进行搜索的关键词和最大匹配项。
开源框架
- Lucene
- Jsoup
源码
爬虫部分:Spider.java
package webCrawler.Spider;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Scanner;
import org.jsoup.Jsoup;
import org.jsoup.nodes.document;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import webCrawler.Index.BuildIndex;
public class Spider {
ArrayList URLs;
private String startURL;
private int digLevel;
public Spider(String startURL, int digLevel){
this.startURL = startURL;
this.digLevel = digLevel;
this.URLs = new ArrayList<>();
}
public ArrayList getLevelURLs(int level, ArrayList arrayList)
throws IOException{
ArrayList total = null;
if(level>0){
total = new ArrayList<>();
for(String url: arrayList){
for(String each: getBarelinks(url)){
total.add(each);
}
}
HashSet hashSet = new HashSet<>(total);
total = new ArrayList<>(hashSet);
}
return total;
}
public void getAll() throws IOException{
ArrayList newURLs;
ArrayList currentURLs = new ArrayList<>();
currentURLs.add(startURL);
for(int i=digLevel; i>0; i--){
System.out.println("Dig into level: " + (digLevel-i+1));
newURLs = getLevelURLs(i, currentURLs);
for(String each: currentURLs){
URLs.add(each);
}
currentURLs = newURLs;
}
for(String each:currentURLs){
URLs.add(each);
}
HashSet hashSet = new HashSet<>(URLs);
URLs = new ArrayList<>(hashSet);
}
public void storeURLsAndInfo(String path) throws IOException{
BuildIndex build = new BuildIndex(path);
for(String each:URLs){
String text = getlinkText(each);
if(text!=null){
build.addField("url", each);
build.addField("text", text);
build.pushIndex();
}
}
build.close();
}
public String getlinkText(String url) throws IOException{
document document = null;
try {
document = Jsoup.connect(url).timeout(3000).get();
} catch (Exception e) {
System.out.println("[TIMEOUT]Get title of url:"+url);
return null;
}
String title = document.title();
return title;
}
public ArrayList getBarelinks(String url) throws IOException{
ArrayList linksList = new ArrayList<>();
document document;
try {
document = Jsoup.connect(url).timeout(2000).get();
} catch (Exception e) {
return linksList;
}
Elements links = document.select("body").select("a[href]");
for(Element link: links){
String href = link.attr("abs:href").replaceAll("#", "");
if(href.contains("zju.edu.cn")){
if (href.endsWith("/")){
href = href.substring(0, href.length()-1);
}
linksList.add(href);
}
}
HashSet hashSet = new HashSet<>(linksList);
ArrayList arrayList = new ArrayList<>(hashSet);
return arrayList;
}
public static void main(String[] args) {
Scanner in = new Scanner(System.in);
System.out.println("Enter url:");
String url = in.nextLine().trim();
while(!url.startsWith("http://")){
System.out.println("http:// is needed!");
System.out.println("Enter url:");
url = in.nextLine().trim();
}
System.out.println("Enter depth to dig more urls[<=3 recommended]:");
int depth = in.nextInt();
Spider spider = new Spider(url, depth);
System.out.println("Enter path you want to save[default=d:/index-spider]:");
String path = in.nextLine().trim();
if(path.length()==0){
path = "d:/index-spider";
}
try {
System.out.println("Start fetching...");
spider.getAll();
System.out.println("Urls got success!");
spider.storeURLsAndInfo(path);
System.out.println("Stored success!");
} catch (IOException e) {
e.printStackTrace();
}
}
}
建立索引:BuildIndex.java
package webCrawler.Index;
import java.io.*;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.util.Version;
import org.wltea.analyzer.lucene.IKAnalyzer;
public class BuildIndex {
private File file;
private Directory directory;
private IndexWriter indexWriter;
private IndexWriterConfig config;
private Analyzer analyzer;
private document document;
public BuildIndex(String path) {
try {
file = new File(path);
directory = FSDirectory.open(file);
document = new document();
analyzer = new IKAnalyzer();
config = new IndexWriterConfig(Version.LUCENE_4_10_0, analyzer);
indexWriter = new IndexWriter(directory, config);
} catch (Exception e) {
e.printStackTrace();
}
}
public void addField(String fieldName, String fieldText){
try{
Field field = new TextField(fieldName, fieldText, Field.Store.YES);
document.add(field);
}catch (Exception e) {
e.printStackTrace();
}
}
public void pushIndex(){
try {
indexWriter.adddocument(document);
document = new document();
} catch (Exception e) {
e.printStackTrace();
}
}
public void addoneIndex(String url, String text){
this.addField("url", url);
this.addField("text", text);
this.pushIndex();
}
public void close(){
try {
indexWriter.close();
} catch (Exception e) {
e.printStackTrace();
}
}
}
搜索索引
package webCrawler.Index;
import java.io.File;
import java.util.Scanner;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.FSDirectory;
import org.wltea.analyzer.lucene.IKAnalyzer;
public class SearchIndex {
private IndexSearcher indexSearcher;
private Analyzer analyzer;
private QueryParser parser;
private Query query;
private TopDocs hits;
private DirectoryReader reader;
public SearchIndex(String path){
try {
reader = DirectoryReader.open(FSDirectory.open(new File(path)));
indexSearcher = new IndexSearcher(reader);
analyzer = new IKAnalyzer();
} catch (Exception e) {
e.printStackTrace();
}
}
public int search(String fieldName, String text, int matchNumber){
try {
parser = new QueryParser(fieldName, analyzer);
query = parser.parse(text);
hits = indexSearcher.search(query, matchNumber);
return hits.totalHits;
} catch (Exception e) {
e.printStackTrace();
}
return -1;
}
public void printHits(){
try{
System.out.println("Total hits number:"+hits.totalHits);
for(ScoreDoc doc: hits.scoreDocs){
document document = indexSearcher.doc(doc.doc);
System.out.println(document.get("url"));
System.out.println(document.get("text"));
}
reader.close();
}catch (Exception e) {
e.printStackTrace();
}
}
public static void main(String[] args) {
Scanner in = new Scanner(System.in);
System.out.println("Enter path of the index:");
String path = in.nextLine().trim();
while(path.length()==0){
System.out.println("Enter path of the index:");
path = in.nextLine().trim();
}
System.out.println("Enter max hit number:");
int max = in.nextInt();
while(max<0){
System.out.println("Enter max hit number:");
max = in.nextInt();
}
in.nextLine();
System.out.print("Search>>> ");
String text = in.nextLine().trim();
while(!text.equals("q")){
if(text.length()>0){
SearchIndex search = new SearchIndex(path);
int hits = search.search("text", text, max);
if(hits!=-1){
search.printHits();
}
}
System.out.print("Search>>> ");
text = in.nextLine().trim();
}
}
}
UI界面(这里为了方便只是命令行的形式,可以根据需求写一个GUI界面)
package webCrawler.UI;
import java.util.Scanner;
import webCrawler.Index.SearchIndex;
public class UI {
public static void main(String[] args) {
Scanner in = new Scanner(System.in);
System.out.print("Search>>> ");
String text = in.nextLine().trim();
while(!text.equals("q") && text.length()>0){
SearchIndex search = new SearchIndex("d:/index-spider2");
int hits = search.search("text", text, 20);
if(hits!=-1){
search.printHits();
}
System.out.print("Search>>> ");
text = in.nextLine().trim();
}
}
}
以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持考高分网。



