001/* 002 Licensed to the Apache Software Foundation (ASF) under one 003 or more contributor license agreements. See the NOTICE file 004 distributed with this work for additional information 005 regarding copyright ownership. The ASF licenses this file 006 to you under the Apache License, Version 2.0 (the 007 "License"); you may not use this file except in compliance 008 with the License. You may obtain a copy of the License at 009 010 http://www.apache.org/licenses/LICENSE-2.0 011 012 Unless required by applicable law or agreed to in writing, 013 software distributed under the License is distributed on an 014 "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 015 KIND, either express or implied. See the License for the 016 specific language governing permissions and limitations 017 under the License. 018 */ 019package org.apache.wiki.search; 020 021import org.apache.commons.lang3.StringUtils; 022import org.apache.logging.log4j.LogManager; 023import org.apache.logging.log4j.Logger; 024import org.apache.lucene.analysis.Analyzer; 025import org.apache.lucene.analysis.TokenStream; 026import org.apache.lucene.analysis.classic.ClassicAnalyzer; 027import org.apache.lucene.document.Document; 028import org.apache.lucene.document.Field; 029import org.apache.lucene.document.StringField; 030import org.apache.lucene.document.TextField; 031import org.apache.lucene.index.DirectoryReader; 032import org.apache.lucene.index.IndexReader; 033import org.apache.lucene.index.IndexWriter; 034import org.apache.lucene.index.IndexWriterConfig; 035import org.apache.lucene.index.IndexWriterConfig.OpenMode; 036import org.apache.lucene.index.Term; 037import org.apache.lucene.queryparser.classic.MultiFieldQueryParser; 038import org.apache.lucene.queryparser.classic.ParseException; 039import org.apache.lucene.queryparser.classic.QueryParser; 040import org.apache.lucene.search.IndexSearcher; 041import org.apache.lucene.search.Query; 042import org.apache.lucene.search.ScoreDoc; 043import org.apache.lucene.search.TermQuery; 044import org.apache.lucene.search.highlight.Highlighter; 045import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; 046import org.apache.lucene.search.highlight.QueryScorer; 047import org.apache.lucene.search.highlight.SimpleHTMLEncoder; 048import org.apache.lucene.search.highlight.SimpleHTMLFormatter; 049import org.apache.lucene.store.Directory; 050import org.apache.lucene.store.NIOFSDirectory; 051import org.apache.wiki.InternalWikiException; 052import org.apache.wiki.WatchDog; 053import org.apache.wiki.WikiBackgroundThread; 054import org.apache.wiki.api.core.Attachment; 055import org.apache.wiki.api.core.Context; 056import org.apache.wiki.api.core.Engine; 057import org.apache.wiki.api.core.Page; 058import org.apache.wiki.api.exceptions.NoRequiredPropertyException; 059import org.apache.wiki.api.exceptions.ProviderException; 060import org.apache.wiki.api.providers.PageProvider; 061import org.apache.wiki.api.providers.WikiProvider; 062import org.apache.wiki.api.search.SearchResult; 063import org.apache.wiki.api.spi.Wiki; 064import org.apache.wiki.attachment.AttachmentManager; 065import org.apache.wiki.auth.AuthorizationManager; 066import org.apache.wiki.auth.permissions.PagePermission; 067import org.apache.wiki.pages.PageManager; 068import org.apache.wiki.util.ClassUtil; 069import org.apache.wiki.util.FileUtil; 070import org.apache.wiki.util.TextUtil; 071 072import java.io.File; 073import java.io.IOException; 074import java.io.InputStream; 075import java.io.InputStreamReader; 076import java.io.StringReader; 077import java.io.StringWriter; 078import java.util.ArrayList; 079import java.util.Arrays; 080import java.util.Collection; 081import java.util.Collections; 082import java.util.Date; 083import java.util.List; 084import java.util.Properties; 085import java.util.concurrent.Executor; 086import java.util.concurrent.Executors; 087import java.util.stream.Collectors; 088 089 090/** 091 * Interface for the search providers that handle searching the Wiki 092 * 093 * @since 2.2.21. 094 */ 095public class LuceneSearchProvider implements SearchProvider { 096 097 protected static final Logger LOG = LogManager.getLogger( LuceneSearchProvider.class ); 098 099 private Engine m_engine; 100 private Executor searchExecutor; 101 102 // Lucene properties. 103 104 /** Which analyzer to use. Default is StandardAnalyzer. */ 105 public static final String PROP_LUCENE_ANALYZER = "jspwiki.lucene.analyzer"; 106 private static final String PROP_LUCENE_INDEXDELAY = "jspwiki.lucene.indexdelay"; 107 private static final String PROP_LUCENE_INITIALDELAY = "jspwiki.lucene.initialdelay"; 108 109 private String m_analyzerClass = ClassicAnalyzer.class.getName(); 110 111 private static final String LUCENE_DIR = "lucene"; 112 113 /** These attachment file suffixes will be indexed. */ 114 public static final String[] SEARCHABLE_FILE_SUFFIXES = new String[] { ".txt", ".ini", ".xml", ".html", "htm", ".mm", ".htm", 115 ".xhtml", ".java", ".c", ".cpp", ".php", ".asm", ".sh", 116 ".properties", ".kml", ".gpx", ".loc", ".md", ".xml" }; 117 118 protected static final String LUCENE_ID = "id"; 119 protected static final String LUCENE_PAGE_CONTENTS = "contents"; 120 protected static final String LUCENE_AUTHOR = "author"; 121 protected static final String LUCENE_ATTACHMENTS = "attachment"; 122 protected static final String LUCENE_PAGE_NAME = "name"; 123 protected static final String LUCENE_PAGE_KEYWORDS = "keywords"; 124 125 private String m_luceneDirectory; 126 protected final List< Object[] > m_updates = Collections.synchronizedList( new ArrayList<>() ); 127 128 /** Maximum number of fragments from search matches. */ 129 private static final int MAX_FRAGMENTS = 3; 130 131 /** The maximum number of hits to return from searches. */ 132 public static final int MAX_SEARCH_HITS = 99_999; 133 134 private static final String PUNCTUATION_TO_SPACES = StringUtils.repeat( " ", TextUtil.PUNCTUATION_CHARS_ALLOWED.length() ); 135 136 /** {@inheritDoc} */ 137 @Override 138 public void initialize( final Engine engine, final Properties props ) throws NoRequiredPropertyException, IOException { 139 m_engine = engine; 140 searchExecutor = Executors.newCachedThreadPool(); 141 142 m_luceneDirectory = engine.getWorkDir() + File.separator + LUCENE_DIR; 143 144 final int initialDelay = TextUtil.getIntegerProperty( props, PROP_LUCENE_INITIALDELAY, LuceneUpdater.INITIAL_DELAY ); 145 final int indexDelay = TextUtil.getIntegerProperty( props, PROP_LUCENE_INDEXDELAY, LuceneUpdater.INDEX_DELAY ); 146 147 m_analyzerClass = TextUtil.getStringProperty( props, PROP_LUCENE_ANALYZER, m_analyzerClass ); 148 // FIXME: Just to be simple for now, we will do full reindex only if no files are in lucene directory. 149 150 final File dir = new File( m_luceneDirectory ); 151 LOG.info( "Lucene enabled, cache will be in: {}", dir.getAbsolutePath() ); 152 try { 153 if( !dir.exists() ) { 154 dir.mkdirs(); 155 } 156 157 if( !dir.exists() || !dir.canWrite() || !dir.canRead() ) { 158 LOG.error( "Cannot write to Lucene directory, disabling Lucene: {}", dir.getAbsolutePath() ); 159 throw new IOException( "Invalid Lucene directory." ); 160 } 161 162 final String[] filelist = dir.list(); 163 if( filelist == null ) { 164 throw new IOException( "Invalid Lucene directory: cannot produce listing: " + dir.getAbsolutePath() ); 165 } 166 } catch( final IOException e ) { 167 LOG.error( "Problem while creating Lucene index - not using Lucene.", e ); 168 } 169 170 // Start the Lucene update thread, which waits first for a little while before starting to go through 171 // the Lucene "pages that need updating". 172 final LuceneUpdater updater = new LuceneUpdater( m_engine, this, initialDelay, indexDelay ); 173 updater.start(); 174 } 175 176 /** 177 * Returns the handling engine. 178 * 179 * @return Current Engine 180 */ 181 protected Engine getEngine() { 182 return m_engine; 183 } 184 185 /** 186 * Performs a full Lucene reindex, if necessary. 187 * 188 * @throws IOException If there's a problem during indexing 189 */ 190 protected void doFullLuceneReindex() throws IOException { 191 final File dir = new File( m_luceneDirectory ); 192 final String[] filelist = dir.list(); 193 if( filelist == null ) { 194 throw new IOException( "Invalid Lucene directory: cannot produce listing: " + dir.getAbsolutePath() ); 195 } 196 197 try { 198 if( filelist.length == 0 ) { 199 // 200 // No files? Reindex! 201 // 202 final Date start = new Date(); 203 204 LOG.info( "Starting Lucene reindexing, this can take a couple of minutes..." ); 205 206 final Directory luceneDir = new NIOFSDirectory( dir.toPath() ); 207 try( final IndexWriter writer = getIndexWriter( luceneDir ) ) { 208 long pagesIndexed = 0L; 209 final Collection< Page > allPages = m_engine.getManager( PageManager.class ).getAllPages(); 210 for( final Page page : allPages ) { 211 try { 212 final String text = m_engine.getManager( PageManager.class ).getPageText( page.getName(), WikiProvider.LATEST_VERSION ); 213 luceneIndexPage( page, text, writer ); 214 pagesIndexed++; 215 } catch( final IOException e ) { 216 LOG.warn( "Unable to index page {}, continuing to next ", page.getName(), e ); 217 } 218 } 219 LOG.info( "Indexed {} pages", pagesIndexed ); 220 221 long attachmentsIndexed = 0L; 222 final Collection< Attachment > allAttachments = m_engine.getManager( AttachmentManager.class ).getAllAttachments(); 223 for( final Attachment att : allAttachments ) { 224 try { 225 final String text = getAttachmentContent( att.getName(), WikiProvider.LATEST_VERSION ); 226 luceneIndexPage( att, text, writer ); 227 attachmentsIndexed++; 228 } catch( final IOException e ) { 229 LOG.warn( "Unable to index attachment {}, continuing to next", att.getName(), e ); 230 } 231 } 232 LOG.info( "Indexed {} attachments", attachmentsIndexed ); 233 } 234 235 final Date end = new Date(); 236 LOG.info( "Full Lucene index finished in {} milliseconds.", end.getTime() - start.getTime() ); 237 } else { 238 LOG.info( "Files found in Lucene directory, not reindexing." ); 239 } 240 } catch( final IOException e ) { 241 LOG.error( "Problem while creating Lucene index - not using Lucene.", e ); 242 } catch( final ProviderException e ) { 243 LOG.error( "Problem reading pages while creating Lucene index (JSPWiki won't start.)", e ); 244 throw new IllegalArgumentException( "unable to create Lucene index" ); 245 } catch( final Exception e ) { 246 LOG.error( "Unable to start lucene", e ); 247 } 248 249 } 250 251 /** 252 * Fetches the attachment content from the repository. 253 * Content is flat text that can be used for indexing/searching or display 254 * 255 * @param attachmentName Name of the attachment. 256 * @param version The version of the attachment. 257 * @return the content of the Attachment as a String. 258 */ 259 protected String getAttachmentContent( final String attachmentName, final int version ) { 260 final AttachmentManager mgr = m_engine.getManager( AttachmentManager.class ); 261 try { 262 final Attachment att = mgr.getAttachmentInfo( attachmentName, version ); 263 //FIXME: Find out why sometimes att is null 264 if( att != null ) { 265 return getAttachmentContent( att ); 266 } 267 } catch( final ProviderException e ) { 268 LOG.error( "Attachment cannot be loaded", e ); 269 } 270 return null; 271 } 272 273 /** 274 * @param att Attachment to get content for. Filename extension is used to determine the type of the attachment. 275 * @return String representing the content of the file. 276 * FIXME This is a very simple implementation of some text-based attachment, mainly used for testing. 277 * This should be replaced /moved to Attachment search providers or some other 'pluggable' way to search attachments 278 */ 279 protected String getAttachmentContent( final Attachment att ) { 280 final AttachmentManager mgr = m_engine.getManager( AttachmentManager.class ); 281 //FIXME: Add attachment plugin structure 282 283 final String filename = att.getFileName(); 284 285 boolean searchSuffix = Arrays.stream(SEARCHABLE_FILE_SUFFIXES).anyMatch(filename::endsWith); 286 287 String out = filename; 288 if( searchSuffix ) { 289 try( final InputStream attStream = mgr.getAttachmentStream( att ); final StringWriter sout = new StringWriter() ) { 290 FileUtil.copyContents( new InputStreamReader( attStream ), sout ); 291 out = out + " " + sout; 292 } catch( final ProviderException | IOException e ) { 293 LOG.error( "Attachment cannot be loaded", e ); 294 } 295 } 296 297 return out; 298 } 299 300 /** 301 * Updates the lucene index for a single page. 302 * 303 * @param page The WikiPage to check 304 * @param text The page text to index. 305 */ 306 protected synchronized void updateLuceneIndex( final Page page, final String text ) { 307 LOG.debug( "Updating Lucene index for page '{}'...", page.getName() ); 308 pageRemoved( page ); 309 310 // Now add back the new version. 311 try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() ); 312 final IndexWriter writer = getIndexWriter( luceneDir ) ) { 313 luceneIndexPage( page, text, writer ); 314 } catch( final IOException e ) { 315 LOG.error( "Unable to update page '{}' from Lucene index", page.getName(), e ); 316 // reindexPage( page ); 317 } catch( final Exception e ) { 318 LOG.error( "Unexpected Lucene exception - please check configuration!", e ); 319 // reindexPage( page ); 320 } 321 322 LOG.debug( "Done updating Lucene index for page '{}'.", page.getName() ); 323 } 324 325 private Analyzer getLuceneAnalyzer() throws ProviderException { 326 try { 327 return ClassUtil.buildInstance( m_analyzerClass ); 328 } catch( final Exception e ) { 329 final String msg = "Could not get LuceneAnalyzer class " + m_analyzerClass + ", reason: "; 330 LOG.error( msg, e ); 331 throw new ProviderException( msg + e ); 332 } 333 } 334 335 /** 336 * Indexes page using the given IndexWriter. 337 * 338 * @param page WikiPage 339 * @param text Page text to index 340 * @param writer The Lucene IndexWriter to use for indexing 341 * @return the created index Document 342 * @throws IOException If there's an indexing problem 343 */ 344 protected Document luceneIndexPage( final Page page, final String text, final IndexWriter writer ) throws IOException { 345 LOG.debug( "Indexing {}...", page.getName() ); 346 347 // make a new, empty document 348 final Document doc = new Document(); 349 if( text == null ) { 350 return doc; 351 } 352 353 final String indexedText = text.replace( "__", " " ); // be nice to Language Analyzers - cfr. JSPWIKI-893 354 355 // Raw name is the keyword we'll use to refer to this document for updates. 356 Field field = new Field( LUCENE_ID, page.getName(), StringField.TYPE_STORED ); 357 doc.add( field ); 358 359 // Body text. It is stored in the doc for search contexts. 360 field = new Field( LUCENE_PAGE_CONTENTS, indexedText, TextField.TYPE_STORED ); 361 doc.add( field ); 362 363 // Allow searching by page name. Both beautified and raw 364 final String unTokenizedTitle = StringUtils.replaceChars( page.getName(), TextUtil.PUNCTUATION_CHARS_ALLOWED, PUNCTUATION_TO_SPACES ); 365 field = new Field( LUCENE_PAGE_NAME, TextUtil.beautifyString( page.getName() ) + " " + unTokenizedTitle, TextField.TYPE_STORED ); 366 doc.add( field ); 367 368 // Allow searching by authorname 369 if( page.getAuthor() != null ) { 370 field = new Field( LUCENE_AUTHOR, page.getAuthor(), TextField.TYPE_STORED ); 371 doc.add( field ); 372 } 373 374 // Now add the names of the attachments of this page 375 try { 376 final List< Attachment > attachments = m_engine.getManager( AttachmentManager.class ).listAttachments( page ); 377 final String attachmentNames = attachments.stream().map(att -> att.getName() + ";").collect(Collectors.joining()); 378 379 field = new Field( LUCENE_ATTACHMENTS, attachmentNames, TextField.TYPE_STORED ); 380 doc.add( field ); 381 382 } catch( final ProviderException e ) { 383 // Unable to read attachments 384 LOG.error( "Failed to get attachments for page", e ); 385 } 386 387 // also index page keywords, if available 388 if( page.getAttribute( "keywords" ) != null ) { 389 field = new Field( LUCENE_PAGE_KEYWORDS, page.getAttribute( "keywords" ).toString(), TextField.TYPE_STORED ); 390 doc.add( field ); 391 } 392 synchronized( writer ) { 393 writer.addDocument( doc ); 394 } 395 396 return doc; 397 } 398 399 /** 400 * {@inheritDoc} 401 */ 402 @Override 403 public synchronized void pageRemoved( final Page page ) { 404 try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() ); 405 final IndexWriter writer = getIndexWriter( luceneDir ) ) { 406 final Query query = new TermQuery( new Term( LUCENE_ID, page.getName() ) ); 407 writer.deleteDocuments( query ); 408 } catch( final Exception e ) { 409 LOG.error( "Unable to remove page '{}' from Lucene index", page.getName(), e ); 410 } 411 } 412 413 IndexWriter getIndexWriter( final Directory luceneDir ) throws IOException, ProviderException { 414 final IndexWriterConfig writerConfig = new IndexWriterConfig( getLuceneAnalyzer() ); 415 writerConfig.setOpenMode( OpenMode.CREATE_OR_APPEND ); 416 return new IndexWriter( luceneDir, writerConfig ); 417 } 418 419 /** 420 * Adds a page-text pair to the lucene update queue. Safe to call always 421 * 422 * @param page WikiPage to add to the update queue. 423 */ 424 @Override 425 public void reindexPage( final Page page ) { 426 if( page != null ) { 427 final String text; 428 429 // TODO: Think if this was better done in the thread itself? 430 if( page instanceof Attachment ) { 431 text = getAttachmentContent( ( Attachment ) page ); 432 } else { 433 text = m_engine.getManager( PageManager.class ).getPureText( page ); 434 } 435 436 if( text != null ) { 437 // Add work item to m_updates queue. 438 final Object[] pair = new Object[ 2 ]; 439 pair[ 0 ] = page; 440 pair[ 1 ] = text; 441 m_updates.add( pair ); 442 LOG.debug( "Scheduling page {} for index update", page.getName() ); 443 } 444 } 445 } 446 447 /** {@inheritDoc} */ 448 @Override 449 public Collection< SearchResult > findPages( final String query, final Context wikiContext ) throws ProviderException { 450 return findPages( query, FLAG_CONTEXTS, wikiContext ); 451 } 452 453 /** Create contexts also. Generating contexts can be expensive, so they're not on by default. */ 454 public static final int FLAG_CONTEXTS = 0x01; 455 456 /** 457 * Searches pages using a particular combination of flags. 458 * 459 * @param query The query to perform in Lucene query language 460 * @param flags A set of flags 461 * @return A Collection of SearchResult instances 462 * @throws ProviderException if there is a problem with the backend 463 */ 464 public Collection< SearchResult > findPages( final String query, final int flags, final Context wikiContext ) throws ProviderException { 465 ArrayList< SearchResult > list = null; 466 Highlighter highlighter = null; 467 468 try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() ); 469 final IndexReader reader = DirectoryReader.open( luceneDir ) ) { 470 final String[] queryfields = { LUCENE_PAGE_CONTENTS, LUCENE_PAGE_NAME, LUCENE_AUTHOR, LUCENE_ATTACHMENTS, LUCENE_PAGE_KEYWORDS }; 471 final QueryParser qp = new MultiFieldQueryParser( queryfields, getLuceneAnalyzer() ); 472 final Query luceneQuery = qp.parse( query ); 473 final IndexSearcher searcher = new IndexSearcher( reader, searchExecutor ); 474 475 if( ( flags & FLAG_CONTEXTS ) != 0 ) { 476 highlighter = new Highlighter( new SimpleHTMLFormatter( "<span class=\"searchmatch\">", "</span>" ), 477 new SimpleHTMLEncoder(), 478 new QueryScorer( luceneQuery ) ); 479 } 480 481 final ScoreDoc[] hits = searcher.search( luceneQuery, MAX_SEARCH_HITS ).scoreDocs; 482 final AuthorizationManager mgr = m_engine.getManager( AuthorizationManager.class ); 483 484 list = new ArrayList<>( hits.length ); 485 for( final ScoreDoc hit : hits ) { 486 final int docID = hit.doc; 487 final Document doc = searcher.doc( docID ); 488 final String pageName = doc.get( LUCENE_ID ); 489 final Page page = m_engine.getManager( PageManager.class ).getPage( pageName, PageProvider.LATEST_VERSION ); 490 491 if( page != null ) { 492 final PagePermission pp = new PagePermission( page, PagePermission.VIEW_ACTION ); 493 if( mgr.checkPermission( wikiContext.getWikiSession(), pp ) ) { 494 final int score = ( int ) ( hit.score * 100 ); 495 496 // Get highlighted search contexts 497 final String text = doc.get( LUCENE_PAGE_CONTENTS ); 498 499 String[] fragments = new String[ 0 ]; 500 if( text != null && highlighter != null ) { 501 final TokenStream tokenStream = getLuceneAnalyzer().tokenStream( LUCENE_PAGE_CONTENTS, new StringReader( text ) ); 502 fragments = highlighter.getBestFragments( tokenStream, text, MAX_FRAGMENTS ); 503 } 504 505 final SearchResult result = new SearchResultImpl( page, score, fragments ); 506 list.add( result ); 507 } 508 } else { 509 LOG.error( "Lucene found a result page '{}' that could not be loaded, removing from Lucene cache", pageName ); 510 pageRemoved( Wiki.contents().page( m_engine, pageName ) ); 511 } 512 } 513 } catch( final IOException e ) { 514 LOG.error( "Failed during lucene search", e ); 515 } catch( final ParseException e ) { 516 LOG.error( "Broken query; cannot parse query: {}", query, e ); 517 throw new ProviderException( "You have entered a query Lucene cannot process [" + query + "]: " + e.getMessage() ); 518 } catch( final InvalidTokenOffsetsException e ) { 519 LOG.error( "Tokens are incompatible with provided text ", e ); 520 } 521 522 return list; 523 } 524 525 /** {@inheritDoc} */ 526 @Override 527 public String getProviderInfo() { 528 return "LuceneSearchProvider"; 529 } 530 531 /** 532 * Updater thread that updates Lucene indexes. 533 */ 534 private static final class LuceneUpdater extends WikiBackgroundThread { 535 static final int INDEX_DELAY = 5; 536 static final int INITIAL_DELAY = 60; 537 private final LuceneSearchProvider m_provider; 538 539 private final int m_initialDelay; 540 541 private WatchDog m_watchdog; 542 543 private LuceneUpdater( final Engine engine, final LuceneSearchProvider provider, final int initialDelay, final int indexDelay ) { 544 super( engine, indexDelay ); 545 m_provider = provider; 546 m_initialDelay = initialDelay; 547 setName( "JSPWiki Lucene Indexer" ); 548 } 549 550 @Override 551 public void startupTask() throws Exception { 552 m_watchdog = WatchDog.getCurrentWatchDog( getEngine() ); 553 554 // Sleep initially... 555 try { 556 Thread.sleep( m_initialDelay * 1000L ); 557 } catch( final InterruptedException e ) { 558 throw new InternalWikiException( "Interrupted while waiting to start.", e ); 559 } 560 561 m_watchdog.enterState( "Full reindex" ); 562 // Reindex everything 563 m_provider.doFullLuceneReindex(); 564 m_watchdog.exitState(); 565 } 566 567 @Override 568 public void backgroundTask() { 569 m_watchdog.enterState( "Emptying index queue", 60 ); 570 571 synchronized( m_provider.m_updates ) { 572 while( m_provider.m_updates.size() > 0 ) { 573 final Object[] pair = m_provider.m_updates.remove( 0 ); 574 final Page page = ( Page )pair[ 0 ]; 575 final String text = ( String )pair[ 1 ]; 576 m_provider.updateLuceneIndex( page, text ); 577 } 578 } 579 580 m_watchdog.exitState(); 581 } 582 583 } 584 585 // FIXME: This class is dumb; needs to have a better implementation 586 private static class SearchResultImpl implements SearchResult { 587 588 private final Page m_page; 589 private final int m_score; 590 private final String[] m_contexts; 591 592 public SearchResultImpl( final Page page, final int score, final String[] contexts ) { 593 m_page = page; 594 m_score = score; 595 m_contexts = contexts != null ? contexts.clone() : null; 596 } 597 598 @Override 599 public Page getPage() { 600 return m_page; 601 } 602 603 /* (non-Javadoc) 604 * @see org.apache.wiki.SearchResult#getScore() 605 */ 606 @Override 607 public int getScore() { 608 return m_score; 609 } 610 611 612 @Override 613 public String[] getContexts() { 614 return m_contexts; 615 } 616 } 617 618}