常用Query查询 1. TermQuery // 指定词元检索 第一个参数:field 第二个参数: term 词元 Query query = new TermQuery(new Term("content","百" )); 2. TermRangeQuery // 范围检索(待测试) Query query = new TermRangeQuery("name", new BytesRef("zh"),new BytesRef("an"), true, true); 3. NumericRangeQuery // 按数字范围检索 第一个参数:field 第二个参数:start 第三个参数:end 第四个参数:是否包含start值 第五个参数: 是否包含end值 Query query = NumericRangeQuery.newIntRange("id", 2, 3, true,false); 4. PrefixQuery // 按指定前缀检索 Query query = new PrefixQuery(new Term("name","lis")); 5. WildcardQuery // 通配符检索 ? 表示占1个字符 * 匹配0-n个字符 Query query = new WildcardQuery(new Term("content","育*")); 6. FuzzyQuery // 模糊检索 搜索的关键字即使有错,在一定范围内也可以查到 Query query = new FuzzyQuery(new Term("content","baiahi")); //baizhi 7. BooleanQuery // 布尔检索 BooleanQuery query = new BooleanQuery(); TermQuery query1 = new TermQuery(new Term("content","百")); TermQuery query2 = new TermQuery(new Term("name","zhangsan")); // Occur.xxx 可选值:MUST--->必须存在 MUST_NOT--->必须不存在 SHOULD--->可有可无 query.add(query1,Occur.MUST_NOT); //第二个参数:查询的结果中 不要包含 "百" 文档 query.add(query2,Occur.MUST); 8. PhraseQuery // 短语检索 一般适用于英文短句检索 PhraseQuery query = new PhraseQuery(); // slop是指两个项的位置之间允许的最大间隔距离 query.setSlop(1); query.add(new Term("content","i")); query.add(new Term("content","baizhi")); // 检索 i [**] baizhi 的所有文档
基于QueryParser的高级搜索(可以替换Query查询)
@Test
public void indexSearchByQueryParser(){
try {
FSDirectory directory = FSDirectory
.open(Paths
.get(
"F:\\lucene\\index\\example03"))
IndexReader indexReader = DirectoryReader
.open(directory)
DirectoryReader directoryReader = DirectoryReader
.openIfChanged((DirectoryReader) indexReader)
if(directoryReader != null) indexReader = directoryReader
// 创建检索器
IndexSearcher searcher = new IndexSearcher(indexReader)
// 创建查询解析器 第一个参数:指定默认检索域 第二个参数:分词器
QueryParser queryParser = new QueryParser(
"content", new StandardAnalyzer())
Query query = null
// 使用 parse(表达式) 可以完全替换Query查询
// 分词检索
query = queryParser
.parse(
"百知")
// 范围检索 不支持数值范围检索
// test ---> 表示检索域 [a TO e] ---> test域中a到e的文档(包括a和e)
query = queryParser
.parse(
"test:[a TO e]")
// test ---> 表示检索域 {a TO e} ---> test域中a到e的文档(不包括a和
z)
query = queryParser
.parse(
"age:{10 TO 20}")
// 通配符 检索
query = queryParser
.parse(
"百*")
query = queryParser
.parse(
"lo?e")
query = queryParser
.parse(
"baiz?i")
// 模糊检索 ~ 表示模糊检索
query = queryParser
.parse(
"baizha~")
query = queryParser
.parse(
"lovi~")
// 多条件检索
//检索name中有zhangsan 或者 content中有lucene的文档
query = queryParser
.parse(
"name:zhangsan OR lucene")
//检索name中有zhangsan 并且 content中有lucene的文档
query = queryParser
.parse(
"name:zhangsan AND baizhi")
//检索name中有zhangsan 并且 content中没有lucene的文档
query = queryParser
.parse(
"name:zhangsan NOT lucene")
// 短语检索
query = queryParser
.parse(
"content:\"love lucene\"")
// 临近检索 :词组中的多个词之间的距离之和不超过
1,则满足查询。
query = queryParser
.parse(
"content:\"i lucene\"~1")
// 加权检索
query = queryParser
.parse(
"百知^0.1")
// 布尔检索
query = queryParser
.parse(
"+love -name:zhangsan")
query = queryParser
.parse(
"love -baizhi")
TopDocs topDocs = searcher
.search(query,
10)
System
.out.println(
"查询结果:"+topDocs
.totalHits+
"条")
System
.out.println(
"最大文档数:"+indexReader
.maxDoc())
System
.out.println(
"最大索引文档数:"+indexReader
.numDocs())
for(ScoreDoc sd : topDocs
.scoreDocs){
System
.out.print(searcher
.doc(sd
.doc)
.get(
"id")+
" | ")
System
.out.print(searcher
.doc(sd
.doc)
.get(
"name")+
" | ")
System
.out.print(searcher
.doc(sd
.doc)
.get(
"content")+
" | ")
System
.out.print(DateTools
.stringToDate(searcher
.doc(sd
.doc)
.get(
"date")))
System
.out.println()
}
indexReader
.close()
directory
.close()
} catch (Exception e) {
// TODO Auto-generated catch block
e
.printStackTrace()
}
}
@Test
public void indexCreate(){
try {
FSDirectory directory = FSDirectory
.open(Paths
.get(
"F:\\lucene\\index\\example03"))
StandardAnalyzer analyzer = new StandardAnalyzer()
IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig(analyzer))
Document doc1 = new Document()
doc1
.add(new IntField(
"id",
1,Field
.Store.YES))
doc1
.add(new StringField(
"name",
"zhangsan", Field
.Store.YES))
doc1
.add(new StringField(
"age",
"15", Field
.Store.YES))
doc1
.add(new StringField(
"test",
"a", Field
.Store.YES))
doc1
.add(new TextField(
"content",
"百知教育 I love baizhi", Field
.Store.YES))
doc1
.add(new Field(
"date", DateTools
.dateToString(new Date(), Resolution
.SECOND), StringField
.TYPE_STORED))
Document doc2 = new Document()
doc2
.add(new IntField(
"id",
2,Field
.Store.YES))
doc2
.add(new StringField(
"test",
"e", Field
.Store.YES))
doc2
.add(new StringField(
"age",
"18", Field
.Store.YES))
doc2
.add(new StringField(
"name",
"lisi", Field
.Store.YES))
doc2
.add(new TextField(
"content",
"lucene实战开发 I love Lucene", Field
.Store.YES))
doc2
.add(new Field(
"date", DateTools
.dateToString(new Date(), Resolution
.SECOND), StringField
.TYPE_STORED))
indexWriter
.addDocument(doc2)
indexWriter
.close()
directory
.close()
} catch (Exception e) {
// TODO Auto-generated catch block
e
.printStackTrace()
}
}
分页检索
/**
*
* @author gaozhy
* @description 添加测试数据
* @2017年4月14日 上午9:35:09
*/
@Test
public void addTestData(){
try {
FSDirectory directory = FSDirectory.open(Paths.get(
"F:\\lucene\\index\\example04"));
IndexWriter indexWriter =
new IndexWriter(directory,
new IndexWriterConfig(
new StandardAnalyzer()));
Document doc =
null;
for (
int i =
1; i <=
100; i++) {
doc =
new Document();
doc.add(
new IntField(
"id", i, Store.YES));
doc.add(
new StringField(
"name",
"张三"+i,Store.YES));
doc.add(
new TextField(
"content",
new StringReader(
"java")));
indexWriter.addDocument(doc);
}
indexWriter.close();
directory.close();
}
catch (Exception e) {
e.printStackTrace();
}
}
/**
*
* @author gaozhy
* @description 分页检索
* @2017年4月14日 上午9:27:01
*/
@Test
public void indexSearchByPage() {
try {
FSDirectory directory = FSDirectory.open(Paths.get(
"F:\\lucene\\index\\example04"));
IndexReader indexReader = DirectoryReader.open(directory);
DirectoryReader directoryReader = DirectoryReader.openIfChanged((DirectoryReader) indexReader);
if(directoryReader !=
null){
indexReader.close();
indexReader = directoryReader;
}
IndexSearcher searcher =
new IndexSearcher(indexReader);
QueryParser parser =
new QueryParser(
"content",
new StandardAnalyzer());
Query query = parser.parse(
"name:张三*");
int nowPage =
8;
int pageSize =
10;
int start = (nowPage-
1)*pageSize;
TopDocs td1 = searcher.search(query, start);
ScoreDoc scoreDoc =
null;
if (start >
1){
scoreDoc = td1.scoreDocs[start-
1];
}
TopDocs topDocs = searcher.searchAfter(scoreDoc, query, pageSize);
System.out.println(
"总条数:"+topDocs.totalHits);
for (ScoreDoc sd : topDocs.scoreDocs) {
Document doc = searcher.doc(sd.doc);
System.out.print(doc.get(
"id")+
" | ");
System.out.print(doc.get(
"name")+
" | ");
System.out.print(doc.get(
"content"));
System.out.println();
}
indexReader.close();
directory.close();
}
catch (Exception e) {
e.printStackTrace();
}
}