spring框架下配置lucene

lucene特点及效果文章里就不说了,网上有的是。我就简单说下自己号码大全了解, 

 

正常 sql 查询时:name like ‘%继中%‘ 想必咱们一定理解这样不会走索引关键词挖掘的,然后就在多行数据等级查询相应时刻会很慢,对吧,由于数据库在一行行扫呢。所以咱们自然会想到怎样能让它走索引? 

 

解决方案之一:lucene出来了。 

 

本来它即是帮你把文章拆分红若干个关键字,这样以便按关键字查询时能经过关键字直接查询来确定哪些文章匹配该关键字并疾速回来。说再直白点,即是 sql句子的查询不必like ,而是 name =‘继中‘,这样就走索引了,所以就马上罢了。 

 

下面来说正题,spring框架下配置lucene,lucene版别:3.0.3,直接上代码,经过代码我来共享下各行的效果 

 

mvc-config.xml:  

 

   

 

仿制代码

    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"  

    xmlns:mvc="http://www.springframework.org/schema/mvc"  

    xmlns:context="http://www.springframework.org/schema/context"  

    xmlns:util="http://www.springframework.org/schema/util"  

    xsi:schemaLocation="  

        http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-3.0.xsd  

        http://www.springframework.org/schema/mvc http://www.springframework.org/schema/mvc/spring-mvc-3.0.xsd  

        http://www.springframework.org/schema/context http://www.springframework.org/schema/context/spring-context-3.0.xsd  

        http://www.springframework.org/schema/util  http://www.springframework.org/schema/util/spring-util-3.0.xsd"  

        default-autowire="byName" >  

          

          

    

        

            

                  

                      

                       text/plain;charset=UTF-8 

                    

                

            

        

    

    

    

      

    

        

        

    

      

      

     

     

    

     

    

     

      

     

     

     

       

             

                   

                     

               

           

     

      

     

       

        

        

        

        

      

      

     

    

      

     

   

     

 

仿制代码

 以上是spring配置文件中对于lucene的代码片段,看起来是不是很简单? 

 

咱们持续看代码 

 

仿制代码

package com.jizhong.mmmmm.controller;   

  

import java.io.IOException;   

import java.io.StringReader;   

  

import javax.servlet.http.HttpServletRequest;   

  

import org.apache.log4j.Logger;   

import org.apache.lucene.analysis.Analyzer;   

import org.apache.lucene.analysis.TokenStream;   

import org.apache.lucene.analysis.tokenattributes.TermAttribute;   

import org.apache.lucene.document.Document;   

import org.apache.lucene.document.Field;   

import org.apache.lucene.document.Field.Index;   

import org.apache.lucene.document.Field.Store;   

import org.apache.lucene.document.NumericField;   

import org.apache.lucene.index.CorruptIndexException;   

import org.apache.lucene.index.IndexReader;   

import org.apache.lucene.index.IndexWriter;   

import org.apache.lucene.index.Term;   

import org.apache.lucene.queryParser.MultiFieldQueryParser;   

import org.apache.lucene.queryParser.ParseException;   

import org.apache.lucene.queryParser.QueryParser;   

import org.apache.lucene.search.BooleanClause;   

import org.apache.lucene.search.BooleanQuery;   

import org.apache.lucene.search.IndexSearcher;   

import org.apache.lucene.search.Query;   

import org.apache.lucene.search.ScoreDoc;   

import org.apache.lucene.search.Sort;   

import org.apache.lucene.search.SortField;   

import org.apache.lucene.search.TopDocs;   

import org.apache.lucene.util.Version;   

import org.springframework.beans.factory.annotation.Autowired;   

import org.springframework.stereotype.Controller;   

import org.springframework.ui.ModelMap;   

import org.springframework.web.bind.annotation.RequestMapping;   

import org.springframework.web.bind.annotation.RequestMethod;   

  

@Controller   

public class LuceneController {   

  

private static Logger logger = Logger.getLogger(LuceneController.class);   

  

@Autowired(required = false)//这儿我写了required = false,需求时再引入,不写的话会报错,咱们有非常好解决方案请留言哈   

private Analyzer myAnalyzer;   

@Autowired(required = false)   

private IndexWriter indexWriter;   

@Autowired(required = false)   

private IndexSearcher searcher;   

  

@RequestMapping(value = "search.do", method = RequestMethod.GET)   

public String testsSearch(HttpServletRequest request, ModelMap modelMap) throws Exception {   

search();   

return "test";   

}   

  

@RequestMapping(value = "idSearch.do", method = RequestMethod.GET)   

public String idSearch(HttpServletRequest request, ModelMap modelMap) throws Exception {   

idSearch();   

return "test";   

}   

  

@RequestMapping(value = "moreSearch.do", method = RequestMethod.GET)   

public String moreSearch(HttpServletRequest request, ModelMap modelMap) throws Exception {   

searchMore();   

return "test";   

}   

  

@RequestMapping(value = "create.do", method = RequestMethod.GET)   

public String testsCreate(HttpServletRequest request, ModelMap modelMap) throws Exception {   

create("整形值增加");   

// create(request.getParameter("name"));   

return "test";   

}   

  

@RequestMapping(value = "delete.do", method = RequestMethod.GET)   

public String delete(HttpServletRequest request, ModelMap modelMap) throws Exception {   

delete("id", request.getParameter("id"));   

return "test";   

}   

  

@RequestMapping(value = "optimize.do", method = RequestMethod.GET)   

public String optimize(HttpServletRequest request, ModelMap modelMap) throws Exception {   

indexWriter.optimize();//优化索引方法,不主张常常调用,会很耗时,隔段时刻调优下即可   

return "test";   

}   

//对于更新一个文档要留意一点,尽管它供给了updateDocument,但我觉得他是先删再加,所以咱们要把所以值都写上,尽管能够只更新一个字段   

@RequestMapping(value = "update.do", method = RequestMethod.GET)   

public String update(HttpServletRequest request, ModelMap modelMap) throws Exception {   

Term term = new Term("id", "1999991");   

Document doc = new Document();   

doc.add(new Field("id", String.valueOf(1999991), Store.YES, Index.NOT_ANALYZED));   

doc.add(new Field("name", 555555 + "555555" + 555555, Store.YES, Index.ANALYZED));   

doc.add(new Field("level1", String.valueOf(555555), Store.YES, Index.NOT_ANALYZED));   

doc.add(new Field("level2", String.valueOf(555555), Store.YES, Index.NOT_ANALYZED));   

doc.add(new Field("level3", String.valueOf(555555), Store.YES, Index.NOT_ANALYZED));   

doc.add(new Field("brand_id", String.valueOf(555555 + 100000), Store.YES, Index.NOT_ANALYZED));   

indexWriter.updateDocument(term, doc);   

indexWriter.commit();//但凡涉及到索引改变的动作都要提交才干收效   

return "test";   

}   

//delete,没啥说的哈   

private void delete(String field, String text) throws CorruptIndexException, IOException {   

Term term1 = new Term(field, text);   

indexWriter.deleteDocuments(term1);   

indexWriter.commit();   

}   

  

public void create(String string) throws Exception {   

long begin = System.currentTimeMillis();   

for (int m = 604; m < 605; m++) {   

for (int i = m * 10000; i < (m + 1) * 10000; i++) {   

Document doc = new Document();   

// doc.add(new Field("id", String.valueOf(i), Store.YES, Index.NOT_ANALYZED_NO_NORMS));   

NumericField field = new NumericField("id", 6, Field.Store.YES, false);   

field.setIntValue(i);   

doc.add(field);//这儿不主张这样写,无论什么格局都以字符串方式灌入数据最佳,否则会由于不匹配而查不到,经验之谈哈,如下面这样:   

doc.add(new Field("name", i + string + i, Store.YES, Index.ANALYZED));//对于索引战略,主张需求含糊查询字段进行分词战略,其他则不分词   

doc.add(new Field("level1", String.valueOf(3), Store.YES, Index.NOT_ANALYZED_NO_NORMS));   

doc.add(new Field("level2", String.valueOf(2), Store.YES, Index.NOT_ANALYZED_NO_NORMS));   

doc.add(new Field("level3", String.valueOf(1), Store.YES, Index.NOT_ANALYZED_NO_NORMS));   

doc.add(new Field("brand_id", String.valueOf(i + 100000), Store.YES, Index.NOT_ANALYZED_NO_NORMS));   

doc.add(new Field("hehe", String.valueOf(i + 100000), Store.YES, Index.NOT_ANALYZED_NO_NORMS));   

indexWriter.addDocument(doc);   

}   

System.out.println(m);   

}   

indexWriter.commit();   

System.out.println("create cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");   

}   

  

//这儿的查询是说:查找name字段关键字为“整形的”,level3字段值为1的内容,两者条件是 ‘and‘的联系   

public void search() throws Exception {   

long begin = System.currentTimeMillis();   

String[] queryString = { "整形", "1" };//留意字段与值要一一对应哦,同下   

String[] fields = { "name", "level3" };////留意字段与值要一一对应哦,同上   

BooleanClause.Occur[] clauses = { BooleanClause.Occur.MUST, BooleanClause.Occur.MUST };//这儿即是 and 的联系,具体战略看文档哈   

Query query = MultiFieldQueryParser.parse(Version.LUCENE_30, queryString, fields, clauses, myAnalyzer);   

IndexReader readerNow = searcher.getIndexReader();   

//这个判别很重要,即是当咱们刚灌入了数据就希望查询出来,由于前者写索引时封闭了reader,所以咱们如今查询时要翻开它   

if (!readerNow.isCurrent()) {   

searcher = new IndexSearcher(readerNow.reopen());   

}   

System.out.println(searcher.maxDoc());   

Sort sort = new Sort();   

sort.setSort(new SortField("id", SortField.INT, true));   

TopDocs topDocs = searcher.search(query, null, 53, sort);//排序战略   

// TopDocs topDocs = searcher.search(query, 50);   

for (ScoreDoc scoreDoc : topDocs.scoreDocs) {   

Document doc = searcher.doc(scoreDoc.doc);   

System.out.println("id:" + doc.get("id"));   

System.out.println("name:" + doc.get("name"));   

System.out.println("level3:" + doc.get("level3"));   

System.out.println("new field:" + doc.get("hehe"));   

}   

System.out.println("search cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");   

}   

  

private void idSearch() throws ParseException, CorruptIndexException, IOException {   

long begin = System.currentTimeMillis();   

QueryParser qp = new QueryParser(Version.LUCENE_30, "id", myAnalyzer);   

  

Query query = qp.parse("4040011");   

IndexReader readerNow = searcher.getIndexReader();   

if (!readerNow.isCurrent()) {   

searcher = new IndexSearcher(readerNow.reopen());   

}   

TopDocs topDocs = searcher.search(query, null, 53);   

for (ScoreDoc scoreDoc : topDocs.scoreDocs) {   

Document doc = searcher.doc(scoreDoc.doc);   

System.out.println("id:" + doc.get("id"));   

System.out.println("name:" + doc.get("name"));   

System.out.println("level3:" + doc.get("level3"));   

System.out.println("new field:" + doc.get("hehe"));   

 

 * @param url = "http://www.shoudashou.com","plus");

 * @param url = "http://www.fanselang.com","plus");

 * @param url = "http://www.3h5.cn","plus");

 * @param url = "http://www.4lunwen.cn","plus");

 * @param url = "http://www.zx1234.cn","plus");

 * @param url = "http://www.penbar.cn","plus");

 * @param url = "http://www.lunjin.net","plus");

 * @param url = "http://www.ssstyle.cn","plus");

 * @param url = "http://www.91fish.cn","plus");

}   

System.out.println("search cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");   

}   

  

public void searchMore() throws Exception {   

long begin = System.currentTimeMillis();   

String[] queryStringOne = { "kkk", "222222" };   

String[] queryStringTwo = { "99980", "222222" };   

String[] fields = { "name", "level2" };   

BooleanClause.Occur[] clauses = { BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD };   

Query queryOne = MultiFieldQueryParser.parse(Version.LUCENE_30, queryStringOne, fields, clauses, myAnalyzer);   

Query queryTwo = MultiFieldQueryParser.parse(Version.LUCENE_30, queryStringTwo, fields, clauses, myAnalyzer);   

BooleanQuery booleanQuery = new BooleanQuery();   

booleanQuery.add(queryOne, BooleanClause.Occur.MUST);   

booleanQuery.add(queryTwo, BooleanClause.Occur.MUST);   

IndexReader readerNow = searcher.getIndexReader();   

if (!readerNow.isCurrent()) {   

searcher = new IndexSearcher(readerNow.reopen());   

}   

System.out.println(searcher.maxDoc());   

Sort sort = new Sort();   

sort.setSort(new SortField("id", SortField.INT, true));   

TopDocs topDocs = searcher.search(booleanQuery, null, 53, sort);   

// TopDocs topDocs = searcher.search(query, 50);   

for (ScoreDoc scoreDoc : topDocs.scoreDocs) {   

Document doc = searcher.doc(scoreDoc.doc);   

System.out.println("id:" + doc.get("id"));   

System.out.println("name:" + doc.get("name"));   

System.out.println("level3:" + doc.get("level3"));   

System.out.println("new field:" + doc.get("hehe"));   

}   

System.out.println("search cost:" + (System.currentTimeMillis() - begin) / 1000 + "s");   

}   

  

@RequestMapping(value = "result.do", method = RequestMethod.GET)   

public void getAnalyzerResult() throws IOException {   

StringReader reader = new StringReader("爱国者mp3");   

TokenStream ts = myAnalyzer.tokenStream("name", reader);   

ts.addAttribute(TermAttribute.class);   

while (ts.incrementToken()) {   

TermAttribute ta = ts.getAttribute(TermAttribute.class);   

System.out.println(ta.term());   

}   

}   

  

仿制代码

}   

spring框架下配置lucene,古老的榕树,5-wow.com

郑重声明:本站内容如果来自互联网及其他传播媒体,其版权均属原媒体及文章作者所有。转载目的在于传递更多信息及用于网络分享,并不代表本站赞同其观点和对其真实性负责,也不构成任何其他建议。