001/* 002 Licensed to the Apache Software Foundation (ASF) under one 003 or more contributor license agreements. See the NOTICE file 004 distributed with this work for additional information 005 regarding copyright ownership. The ASF licenses this file 006 to you under the Apache License, Version 2.0 (the 007 "License"); you may not use this file except in compliance 008 with the License. You may obtain a copy of the License at 009 010 http://www.apache.org/licenses/LICENSE-2.0 011 012 Unless required by applicable law or agreed to in writing, 013 software distributed under the License is distributed on an 014 "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 015 KIND, either express or implied. See the License for the 016 specific language governing permissions and limitations 017 under the License. 018 */ 019package org.apache.wiki.search; 020 021import org.apache.commons.lang3.StringUtils; 022import org.apache.log4j.Logger; 023import org.apache.lucene.analysis.Analyzer; 024import org.apache.lucene.analysis.TokenStream; 025import org.apache.lucene.document.Document; 026import org.apache.lucene.document.Field; 027import org.apache.lucene.document.StringField; 028import org.apache.lucene.document.TextField; 029import org.apache.lucene.index.DirectoryReader; 030import org.apache.lucene.index.IndexReader; 031import org.apache.lucene.index.IndexWriter; 032import org.apache.lucene.index.IndexWriterConfig; 033import org.apache.lucene.index.IndexWriterConfig.OpenMode; 034import org.apache.lucene.index.Term; 035import org.apache.lucene.queryparser.classic.MultiFieldQueryParser; 036import org.apache.lucene.queryparser.classic.ParseException; 037import org.apache.lucene.queryparser.classic.QueryParser; 038import org.apache.lucene.search.IndexSearcher; 039import org.apache.lucene.search.Query; 040import org.apache.lucene.search.ScoreDoc; 041import org.apache.lucene.search.TermQuery; 042import org.apache.lucene.search.highlight.Highlighter; 043import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; 044import org.apache.lucene.search.highlight.QueryScorer; 045import org.apache.lucene.search.highlight.SimpleHTMLEncoder; 046import org.apache.lucene.search.highlight.SimpleHTMLFormatter; 047import org.apache.lucene.store.Directory; 048import org.apache.lucene.store.SimpleFSDirectory; 049import org.apache.wiki.InternalWikiException; 050import org.apache.wiki.WatchDog; 051import org.apache.wiki.WikiBackgroundThread; 052import org.apache.wiki.WikiContext; 053import org.apache.wiki.WikiEngine; 054import org.apache.wiki.WikiPage; 055import org.apache.wiki.WikiProvider; 056import org.apache.wiki.api.exceptions.NoRequiredPropertyException; 057import org.apache.wiki.api.exceptions.ProviderException; 058import org.apache.wiki.attachment.Attachment; 059import org.apache.wiki.attachment.AttachmentManager; 060import org.apache.wiki.auth.AuthorizationManager; 061import org.apache.wiki.auth.permissions.PagePermission; 062import org.apache.wiki.parser.MarkupParser; 063import org.apache.wiki.providers.WikiPageProvider; 064import org.apache.wiki.util.ClassUtil; 065import org.apache.wiki.util.FileUtil; 066import org.apache.wiki.util.TextUtil; 067 068import java.io.File; 069import java.io.IOException; 070import java.io.InputStream; 071import java.io.InputStreamReader; 072import java.io.StringReader; 073import java.io.StringWriter; 074import java.lang.reflect.Constructor; 075import java.util.ArrayList; 076import java.util.Collection; 077import java.util.Collections; 078import java.util.Date; 079import java.util.List; 080import java.util.Properties; 081import java.util.concurrent.Executor; 082import java.util.concurrent.Executors; 083 084 085/** 086 * Interface for the search providers that handle searching the Wiki 087 * 088 * @since 2.2.21. 089 */ 090public class LuceneSearchProvider implements SearchProvider { 091 092 protected static final Logger log = Logger.getLogger(LuceneSearchProvider.class); 093 094 private WikiEngine m_engine; 095 private Executor searchExecutor; 096 097 // Lucene properties. 098 099 /** Which analyzer to use. Default is StandardAnalyzer. */ 100 public static final String PROP_LUCENE_ANALYZER = "jspwiki.lucene.analyzer"; 101 102 private static final String PROP_LUCENE_INDEXDELAY = "jspwiki.lucene.indexdelay"; 103 private static final String PROP_LUCENE_INITIALDELAY = "jspwiki.lucene.initialdelay"; 104 105 private String m_analyzerClass = "org.apache.lucene.analysis.standard.ClassicAnalyzer"; 106 107 private static final String LUCENE_DIR = "lucene"; 108 109 /** These attachment file suffixes will be indexed. */ 110 public static final String[] SEARCHABLE_FILE_SUFFIXES = new String[] { ".txt", ".ini", ".xml", ".html", "htm", ".mm", ".htm", 111 ".xhtml", ".java", ".c", ".cpp", ".php", ".asm", ".sh", 112 ".properties", ".kml", ".gpx", ".loc", ".md", ".xml" }; 113 114 protected static final String LUCENE_ID = "id"; 115 protected static final String LUCENE_PAGE_CONTENTS = "contents"; 116 protected static final String LUCENE_AUTHOR = "author"; 117 protected static final String LUCENE_ATTACHMENTS = "attachment"; 118 protected static final String LUCENE_PAGE_NAME = "name"; 119 protected static final String LUCENE_PAGE_KEYWORDS = "keywords"; 120 121 private String m_luceneDirectory; 122 protected List<Object[]> m_updates = Collections.synchronizedList( new ArrayList<>() ); 123 124 /** Maximum number of fragments from search matches. */ 125 private static final int MAX_FRAGMENTS = 3; 126 127 /** The maximum number of hits to return from searches. */ 128 public static final int MAX_SEARCH_HITS = 99_999; 129 130 private static String c_punctuationSpaces = StringUtils.repeat(" ", MarkupParser.PUNCTUATION_CHARS_ALLOWED.length() ); 131 132 /** 133 * {@inheritDoc} 134 */ 135 @Override 136 public void initialize(WikiEngine engine, Properties props) 137 throws NoRequiredPropertyException, IOException 138 { 139 m_engine = engine; 140 searchExecutor = Executors.newCachedThreadPool(); 141 142 m_luceneDirectory = engine.getWorkDir()+File.separator+LUCENE_DIR; 143 144 int initialDelay = TextUtil.getIntegerProperty( props, PROP_LUCENE_INITIALDELAY, LuceneUpdater.INITIAL_DELAY ); 145 int indexDelay = TextUtil.getIntegerProperty( props, PROP_LUCENE_INDEXDELAY, LuceneUpdater.INDEX_DELAY ); 146 147 m_analyzerClass = TextUtil.getStringProperty( props, PROP_LUCENE_ANALYZER, m_analyzerClass ); 148 // FIXME: Just to be simple for now, we will do full reindex 149 // only if no files are in lucene directory. 150 151 File dir = new File(m_luceneDirectory); 152 153 log.info("Lucene enabled, cache will be in: "+dir.getAbsolutePath()); 154 155 try 156 { 157 if( !dir.exists() ) 158 { 159 dir.mkdirs(); 160 } 161 162 if( !dir.exists() || !dir.canWrite() || !dir.canRead() ) 163 { 164 log.error("Cannot write to Lucene directory, disabling Lucene: "+dir.getAbsolutePath()); 165 throw new IOException( "Invalid Lucene directory." ); 166 } 167 168 String[] filelist = dir.list(); 169 170 if( filelist == null ) 171 { 172 throw new IOException( "Invalid Lucene directory: cannot produce listing: "+dir.getAbsolutePath()); 173 } 174 } 175 catch ( IOException e ) 176 { 177 log.error("Problem while creating Lucene index - not using Lucene.", e); 178 } 179 180 // Start the Lucene update thread, which waits first 181 // for a little while before starting to go through 182 // the Lucene "pages that need updating". 183 LuceneUpdater updater = new LuceneUpdater( m_engine, this, initialDelay, indexDelay ); 184 updater.start(); 185 } 186 187 /** 188 * Returns the handling engine. 189 * 190 * @return Current WikiEngine 191 */ 192 protected WikiEngine getEngine() 193 { 194 return m_engine; 195 } 196 197 /** 198 * Performs a full Lucene reindex, if necessary. 199 * 200 * @throws IOException If there's a problem during indexing 201 */ 202 protected void doFullLuceneReindex() throws IOException { 203 File dir = new File(m_luceneDirectory); 204 205 String[] filelist = dir.list(); 206 207 if( filelist == null ) { 208 throw new IOException( "Invalid Lucene directory: cannot produce listing: "+dir.getAbsolutePath()); 209 } 210 211 try { 212 if( filelist.length == 0 ) { 213 // 214 // No files? Reindex! 215 // 216 Date start = new Date(); 217 218 log.info("Starting Lucene reindexing, this can take a couple of minutes..."); 219 220 Directory luceneDir = new SimpleFSDirectory( dir.toPath() ); 221 try( IndexWriter writer = getIndexWriter( luceneDir ) ) 222 { 223 Collection< WikiPage > allPages = m_engine.getPageManager().getAllPages(); 224 for( WikiPage page : allPages ) { 225 226 try { 227 String text = m_engine.getPageManager().getPageText( page.getName(), WikiProvider.LATEST_VERSION ); 228 luceneIndexPage( page, text, writer ); 229 } catch( IOException e ) { 230 log.warn( "Unable to index page " + page.getName() + ", continuing to next ", e ); 231 } 232 } 233 234 Collection< Attachment > allAttachments = m_engine.getAttachmentManager().getAllAttachments(); 235 for( Attachment att : allAttachments ) { 236 try { 237 String text = getAttachmentContent( att.getName(), WikiProvider.LATEST_VERSION ); 238 luceneIndexPage( att, text, writer ); 239 } catch( IOException e ) { 240 log.warn( "Unable to index attachment " + att.getName() + ", continuing to next", e ); 241 } 242 } 243 244 } 245 246 Date end = new Date(); 247 log.info( "Full Lucene index finished in " + (end.getTime() - start.getTime()) + " milliseconds." ); 248 } else { 249 log.info("Files found in Lucene directory, not reindexing."); 250 } 251 } catch ( IOException e ) { 252 log.error("Problem while creating Lucene index - not using Lucene.", e); 253 } catch ( ProviderException e ) { 254 log.error("Problem reading pages while creating Lucene index (JSPWiki won't start.)", e); 255 throw new IllegalArgumentException("unable to create Lucene index"); 256 } catch( Exception e ) { 257 log.error("Unable to start lucene",e); 258 } 259 260 } 261 262 /** 263 * Fetches the attachment content from the repository. 264 * Content is flat text that can be used for indexing/searching or display 265 * 266 * @param attachmentName Name of the attachment. 267 * @param version The version of the attachment. 268 * 269 * @return the content of the Attachment as a String. 270 */ 271 protected String getAttachmentContent( String attachmentName, int version ) 272 { 273 AttachmentManager mgr = m_engine.getAttachmentManager(); 274 275 try 276 { 277 Attachment att = mgr.getAttachmentInfo( attachmentName, version ); 278 //FIXME: Find out why sometimes att is null 279 if(att != null) 280 { 281 return getAttachmentContent( att ); 282 } 283 } 284 catch (ProviderException e) 285 { 286 log.error("Attachment cannot be loaded", e); 287 } 288 // Something was wrong, no result is returned. 289 return null; 290 } 291 292 /** 293 * @param att Attachment to get content for. Filename extension is used to determine the type of the attachment. 294 * @return String representing the content of the file. 295 * FIXME This is a very simple implementation of some text-based attachment, mainly used for testing. 296 * This should be replaced /moved to Attachment search providers or some other 'pluggable' way to search attachments 297 */ 298 protected String getAttachmentContent( Attachment att ) 299 { 300 AttachmentManager mgr = m_engine.getAttachmentManager(); 301 //FIXME: Add attachment plugin structure 302 303 String filename = att.getFileName(); 304 305 boolean searchSuffix = false; 306 for( String suffix : SEARCHABLE_FILE_SUFFIXES ) 307 { 308 if( filename.endsWith( suffix ) ) 309 { 310 searchSuffix = true; 311 } 312 } 313 314 String out = filename; 315 if( searchSuffix ) 316 { 317 try( final InputStream attStream = mgr.getAttachmentStream( att ); final StringWriter sout = new StringWriter() ) { 318 FileUtil.copyContents( new InputStreamReader( attStream ), sout ); 319 out = out + " " + sout.toString(); 320 } catch( ProviderException | IOException e ) { 321 log.error("Attachment cannot be loaded", e); 322 } 323 } 324 325 return out; 326 } 327 328 /** 329 * Updates the lucene index for a single page. 330 * 331 * @param page The WikiPage to check 332 * @param text The page text to index. 333 */ 334 protected synchronized void updateLuceneIndex( final WikiPage page, final String text ) { 335 log.debug("Updating Lucene index for page '" + page.getName() + "'..."); 336 pageRemoved( page ); 337 338 // Now add back the new version. 339 try( final Directory luceneDir = new SimpleFSDirectory( new File( m_luceneDirectory ).toPath() ); 340 final IndexWriter writer = getIndexWriter( luceneDir ) ) { 341 luceneIndexPage( page, text, writer ); 342 } catch( final IOException e ) { 343 log.error("Unable to update page '" + page.getName() + "' from Lucene index", e); 344 // reindexPage( page ); 345 } catch( final Exception e ) { 346 log.error("Unexpected Lucene exception - please check configuration!",e); 347 // reindexPage( page ); 348 } 349 350 log.debug("Done updating Lucene index for page '" + page.getName() + "'."); 351 } 352 353 354 private Analyzer getLuceneAnalyzer() throws ProviderException 355 { 356 try 357 { 358 Class< ? > clazz = ClassUtil.findClass( "", m_analyzerClass ); 359 Constructor< ? > constructor = clazz.getConstructor(); 360 Analyzer analyzer = (Analyzer) constructor.newInstance(); 361 return analyzer; 362 } 363 catch( Exception e ) 364 { 365 String msg = "Could not get LuceneAnalyzer class " + m_analyzerClass + ", reason: "; 366 log.error( msg, e ); 367 throw new ProviderException( msg + e ); 368 } 369 } 370 371 /** 372 * Indexes page using the given IndexWriter. 373 * 374 * @param page WikiPage 375 * @param text Page text to index 376 * @param writer The Lucene IndexWriter to use for indexing 377 * @return the created index Document 378 * @throws IOException If there's an indexing problem 379 */ 380 protected Document luceneIndexPage( final WikiPage page, final String text, final IndexWriter writer ) throws IOException { 381 if( log.isDebugEnabled() ) { 382 log.debug( "Indexing " + page.getName() + "..." ); 383 } 384 385 // make a new, empty document 386 final Document doc = new Document(); 387 388 if( text == null ) { 389 return doc; 390 } 391 final String indexedText = text.replace( "__", " " ); // be nice to Language Analyzers - cfr. JSPWIKI-893 392 393 // Raw name is the keyword we'll use to refer to this document for updates. 394 Field field = new Field( LUCENE_ID, page.getName(), StringField.TYPE_STORED ); 395 doc.add( field ); 396 397 // Body text. It is stored in the doc for search contexts. 398 field = new Field( LUCENE_PAGE_CONTENTS, indexedText, TextField.TYPE_STORED ); 399 doc.add( field ); 400 401 // Allow searching by page name. Both beautified and raw 402 final String unTokenizedTitle = StringUtils.replaceChars( page.getName(), 403 MarkupParser.PUNCTUATION_CHARS_ALLOWED, 404 c_punctuationSpaces ); 405 406 field = new Field( LUCENE_PAGE_NAME, 407 TextUtil.beautifyString( page.getName() ) + " " + unTokenizedTitle, 408 TextField.TYPE_STORED ); 409 doc.add( field ); 410 411 // Allow searching by authorname 412 if( page.getAuthor() != null ) { 413 field = new Field( LUCENE_AUTHOR, page.getAuthor(), TextField.TYPE_STORED ); 414 doc.add( field ); 415 } 416 417 // Now add the names of the attachments of this page 418 try { 419 final List< Attachment > attachments = m_engine.getAttachmentManager().listAttachments( page ); 420 String attachmentNames = ""; 421 422 for( final Attachment att : attachments ) { 423 attachmentNames += att.getName() + ";"; 424 } 425 field = new Field( LUCENE_ATTACHMENTS, attachmentNames, TextField.TYPE_STORED ); 426 doc.add( field ); 427 428 } catch( final ProviderException e ) { 429 // Unable to read attachments 430 log.error( "Failed to get attachments for page", e ); 431 } 432 433 // also index page keywords, if available 434 if( page.getAttribute( "keywords" ) != null ) { 435 field = new Field( LUCENE_PAGE_KEYWORDS, page.getAttribute( "keywords" ).toString(), TextField.TYPE_STORED ); 436 doc.add( field ); 437 } 438 writer.addDocument(doc); 439 440 return doc; 441 } 442 443 /** 444 * {@inheritDoc} 445 */ 446 @Override 447 public void pageRemoved( final WikiPage page ) { 448 try( final Directory luceneDir = new SimpleFSDirectory( new File( m_luceneDirectory ).toPath() ); 449 final IndexWriter writer = getIndexWriter( luceneDir ); ) { 450 final Query query = new TermQuery( new Term( LUCENE_ID, page.getName() ) ); 451 writer.deleteDocuments( query ); 452 } catch ( final Exception e ) { 453 log.error("Unable to remove page '" + page.getName() + "' from Lucene index", e); 454 } 455 } 456 457 IndexWriter getIndexWriter( Directory luceneDir ) throws IOException, ProviderException { 458 IndexWriterConfig writerConfig = new IndexWriterConfig( getLuceneAnalyzer() ); 459 writerConfig.setOpenMode( OpenMode.CREATE_OR_APPEND ); 460 IndexWriter writer = new IndexWriter( luceneDir, writerConfig ); 461 462 // writer.setInfoStream( System.out ); 463 return writer; 464 } 465 466 /** 467 * Adds a page-text pair to the lucene update queue. Safe to call always 468 * 469 * @param page WikiPage to add to the update queue. 470 */ 471 @Override 472 public void reindexPage( WikiPage page ) { 473 if( page != null ) { 474 String text; 475 476 // TODO: Think if this was better done in the thread itself? 477 478 if( page instanceof Attachment ) { 479 text = getAttachmentContent( (Attachment) page ); 480 } else { 481 text = m_engine.getPureText( page ); 482 } 483 484 if( text != null ) { 485 // Add work item to m_updates queue. 486 Object[] pair = new Object[2]; 487 pair[0] = page; 488 pair[1] = text; 489 m_updates.add(pair); 490 log.debug("Scheduling page " + page.getName() + " for index update"); 491 } 492 } 493 } 494 495 /** 496 * {@inheritDoc} 497 */ 498 @Override 499 public Collection< SearchResult > findPages( String query, WikiContext wikiContext ) throws ProviderException { 500 return findPages( query, FLAG_CONTEXTS, wikiContext ); 501 } 502 503 /** 504 * Create contexts also. Generating contexts can be expensive, 505 * so they're not on by default. 506 */ 507 public static final int FLAG_CONTEXTS = 0x01; 508 509 /** 510 * Searches pages using a particular combination of flags. 511 * 512 * @param query The query to perform in Lucene query language 513 * @param flags A set of flags 514 * @return A Collection of SearchResult instances 515 * @throws ProviderException if there is a problem with the backend 516 */ 517 public Collection< SearchResult > findPages( final String query, final int flags, final WikiContext wikiContext ) throws ProviderException { 518 ArrayList<SearchResult> list = null; 519 Highlighter highlighter = null; 520 521 try( final Directory luceneDir = new SimpleFSDirectory( new File( m_luceneDirectory ).toPath() ); 522 final IndexReader reader = DirectoryReader.open( luceneDir ) ) { 523 final String[] queryfields = { LUCENE_PAGE_CONTENTS, LUCENE_PAGE_NAME, LUCENE_AUTHOR, LUCENE_ATTACHMENTS, LUCENE_PAGE_KEYWORDS }; 524 final QueryParser qp = new MultiFieldQueryParser( queryfields, getLuceneAnalyzer() ); 525 final Query luceneQuery = qp.parse( query ); 526 final IndexSearcher searcher = new IndexSearcher( reader, searchExecutor ); 527 528 if( (flags & FLAG_CONTEXTS) != 0 ) { 529 highlighter = new Highlighter(new SimpleHTMLFormatter("<span class=\"searchmatch\">", "</span>"), 530 new SimpleHTMLEncoder(), 531 new QueryScorer(luceneQuery)); 532 } 533 534 final ScoreDoc[] hits = searcher.search(luceneQuery, MAX_SEARCH_HITS).scoreDocs; 535 final AuthorizationManager mgr = m_engine.getAuthorizationManager(); 536 537 list = new ArrayList<>(hits.length); 538 for ( int curr = 0; curr < hits.length; curr++ ) { 539 int docID = hits[curr].doc; 540 Document doc = searcher.doc( docID ); 541 String pageName = doc.get(LUCENE_ID); 542 WikiPage page = m_engine.getPage(pageName, WikiPageProvider.LATEST_VERSION); 543 544 if( page != null ) { 545 if( page instanceof Attachment ) { 546 // Currently attachments don't look nice on the search-results page 547 // When the search-results are cleaned up this can be enabled again. 548 } 549 550 final PagePermission pp = new PagePermission( page, PagePermission.VIEW_ACTION ); 551 if( mgr.checkPermission( wikiContext.getWikiSession(), pp ) ) { 552 final int score = (int)(hits[curr].score * 100); 553 554 // Get highlighted search contexts 555 final String text = doc.get(LUCENE_PAGE_CONTENTS); 556 557 String[] fragments = new String[0]; 558 if( text != null && highlighter != null ) { 559 TokenStream tokenStream = getLuceneAnalyzer().tokenStream(LUCENE_PAGE_CONTENTS, new StringReader(text)); 560 fragments = highlighter.getBestFragments(tokenStream, text, MAX_FRAGMENTS); 561 } 562 563 final SearchResult result = new SearchResultImpl( page, score, fragments ); 564 list.add(result); 565 } 566 } else { 567 log.error("Lucene found a result page '" + pageName + "' that could not be loaded, removing from Lucene cache"); 568 pageRemoved(new WikiPage( m_engine, pageName )); 569 } 570 } 571 } catch( final IOException e ) { 572 log.error("Failed during lucene search",e); 573 } catch( final ParseException e ) { 574 log.info("Broken query; cannot parse query: " + query, e); 575 throw new ProviderException( "You have entered a query Lucene cannot process [" + query + "]: " + e.getMessage() ); 576 } catch( final InvalidTokenOffsetsException e ) { 577 log.error("Tokens are incompatible with provided text ",e); 578 } 579 580 return list; 581 } 582 583 /** 584 * {@inheritDoc} 585 */ 586 @Override 587 public String getProviderInfo() 588 { 589 return "LuceneSearchProvider"; 590 } 591 592 /** 593 * Updater thread that updates Lucene indexes. 594 */ 595 private static final class LuceneUpdater extends WikiBackgroundThread 596 { 597 protected static final int INDEX_DELAY = 5; 598 protected static final int INITIAL_DELAY = 60; 599 private final LuceneSearchProvider m_provider; 600 601 private int m_initialDelay; 602 603 private WatchDog m_watchdog; 604 605 private LuceneUpdater( WikiEngine engine, LuceneSearchProvider provider, 606 int initialDelay, int indexDelay ) 607 { 608 super( engine, indexDelay ); 609 m_provider = provider; 610 setName("JSPWiki Lucene Indexer"); 611 } 612 613 @Override 614 public void startupTask() throws Exception 615 { 616 m_watchdog = getEngine().getCurrentWatchDog(); 617 618 // Sleep initially... 619 try 620 { 621 Thread.sleep( m_initialDelay * 1000L ); 622 } 623 catch( InterruptedException e ) 624 { 625 throw new InternalWikiException("Interrupted while waiting to start.", e); 626 } 627 628 m_watchdog.enterState("Full reindex"); 629 // Reindex everything 630 m_provider.doFullLuceneReindex(); 631 m_watchdog.exitState(); 632 } 633 634 @Override 635 public void backgroundTask() throws Exception 636 { 637 m_watchdog.enterState("Emptying index queue", 60); 638 639 synchronized ( m_provider.m_updates ) 640 { 641 while( m_provider.m_updates.size() > 0 ) 642 { 643 Object[] pair = m_provider.m_updates.remove(0); 644 WikiPage page = ( WikiPage ) pair[0]; 645 String text = ( String ) pair[1]; 646 m_provider.updateLuceneIndex(page, text); 647 } 648 } 649 650 m_watchdog.exitState(); 651 } 652 653 } 654 655 // FIXME: This class is dumb; needs to have a better implementation 656 private static class SearchResultImpl 657 implements SearchResult 658 { 659 private WikiPage m_page; 660 private int m_score; 661 private String[] m_contexts; 662 663 public SearchResultImpl( WikiPage page, int score, String[] contexts ) 664 { 665 m_page = page; 666 m_score = score; 667 m_contexts = contexts != null ? contexts.clone() : null; 668 } 669 670 @Override 671 public WikiPage getPage() 672 { 673 return m_page; 674 } 675 676 /* (non-Javadoc) 677 * @see org.apache.wiki.SearchResult#getScore() 678 */ 679 @Override 680 public int getScore() 681 { 682 return m_score; 683 } 684 685 686 @Override 687 public String[] getContexts() 688 { 689 return m_contexts; 690 } 691 } 692}