001/* 002 Licensed to the Apache Software Foundation (ASF) under one 003 or more contributor license agreements. See the NOTICE file 004 distributed with this work for additional information 005 regarding copyright ownership. The ASF licenses this file 006 to you under the Apache License, Version 2.0 (the 007 "License"); you may not use this file except in compliance 008 with the License. You may obtain a copy of the License at 009 010 http://www.apache.org/licenses/LICENSE-2.0 011 012 Unless required by applicable law or agreed to in writing, 013 software distributed under the License is distributed on an 014 "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 015 KIND, either express or implied. See the License for the 016 specific language governing permissions and limitations 017 under the License. 018 */ 019package org.apache.wiki.search; 020 021import org.apache.commons.lang3.StringUtils; 022import org.apache.logging.log4j.LogManager; 023import org.apache.logging.log4j.Logger; 024import org.apache.lucene.analysis.Analyzer; 025import org.apache.lucene.analysis.TokenStream; 026import org.apache.lucene.document.Document; 027import org.apache.lucene.document.Field; 028import org.apache.lucene.document.StringField; 029import org.apache.lucene.document.TextField; 030import org.apache.lucene.index.DirectoryReader; 031import org.apache.lucene.index.IndexReader; 032import org.apache.lucene.index.IndexWriter; 033import org.apache.lucene.index.IndexWriterConfig; 034import org.apache.lucene.index.IndexWriterConfig.OpenMode; 035import org.apache.lucene.index.Term; 036import org.apache.lucene.queryparser.classic.MultiFieldQueryParser; 037import org.apache.lucene.queryparser.classic.ParseException; 038import org.apache.lucene.queryparser.classic.QueryParser; 039import org.apache.lucene.search.IndexSearcher; 040import org.apache.lucene.search.Query; 041import org.apache.lucene.search.ScoreDoc; 042import org.apache.lucene.search.TermQuery; 043import org.apache.lucene.search.highlight.Highlighter; 044import org.apache.lucene.search.highlight.InvalidTokenOffsetsException; 045import org.apache.lucene.search.highlight.QueryScorer; 046import org.apache.lucene.search.highlight.SimpleHTMLEncoder; 047import org.apache.lucene.search.highlight.SimpleHTMLFormatter; 048import org.apache.lucene.store.Directory; 049import org.apache.lucene.store.NIOFSDirectory; 050import org.apache.wiki.InternalWikiException; 051import org.apache.wiki.WatchDog; 052import org.apache.wiki.WikiBackgroundThread; 053import org.apache.wiki.api.core.Attachment; 054import org.apache.wiki.api.core.Context; 055import org.apache.wiki.api.core.Engine; 056import org.apache.wiki.api.core.Page; 057import org.apache.wiki.api.exceptions.NoRequiredPropertyException; 058import org.apache.wiki.api.exceptions.ProviderException; 059import org.apache.wiki.api.providers.PageProvider; 060import org.apache.wiki.api.providers.WikiProvider; 061import org.apache.wiki.api.search.SearchResult; 062import org.apache.wiki.api.spi.Wiki; 063import org.apache.wiki.attachment.AttachmentManager; 064import org.apache.wiki.auth.AuthorizationManager; 065import org.apache.wiki.auth.permissions.PagePermission; 066import org.apache.wiki.pages.PageManager; 067import org.apache.wiki.util.ClassUtil; 068import org.apache.wiki.util.FileUtil; 069import org.apache.wiki.util.TextUtil; 070 071import java.io.File; 072import java.io.IOException; 073import java.io.InputStream; 074import java.io.InputStreamReader; 075import java.io.StringReader; 076import java.io.StringWriter; 077import java.util.ArrayList; 078import java.util.Collection; 079import java.util.Collections; 080import java.util.Date; 081import java.util.List; 082import java.util.Properties; 083import java.util.concurrent.Executor; 084import java.util.concurrent.Executors; 085 086 087/** 088 * Interface for the search providers that handle searching the Wiki 089 * 090 * @since 2.2.21. 091 */ 092public class LuceneSearchProvider implements SearchProvider { 093 094 protected static final Logger log = LogManager.getLogger(LuceneSearchProvider.class); 095 096 private Engine m_engine; 097 private Executor searchExecutor; 098 099 // Lucene properties. 100 101 /** Which analyzer to use. Default is StandardAnalyzer. */ 102 public static final String PROP_LUCENE_ANALYZER = "jspwiki.lucene.analyzer"; 103 private static final String PROP_LUCENE_INDEXDELAY = "jspwiki.lucene.indexdelay"; 104 private static final String PROP_LUCENE_INITIALDELAY = "jspwiki.lucene.initialdelay"; 105 106 private String m_analyzerClass = "org.apache.lucene.analysis.standard.ClassicAnalyzer"; 107 108 private static final String LUCENE_DIR = "lucene"; 109 110 /** These attachment file suffixes will be indexed. */ 111 public static final String[] SEARCHABLE_FILE_SUFFIXES = new String[] { ".txt", ".ini", ".xml", ".html", "htm", ".mm", ".htm", 112 ".xhtml", ".java", ".c", ".cpp", ".php", ".asm", ".sh", 113 ".properties", ".kml", ".gpx", ".loc", ".md", ".xml" }; 114 115 protected static final String LUCENE_ID = "id"; 116 protected static final String LUCENE_PAGE_CONTENTS = "contents"; 117 protected static final String LUCENE_AUTHOR = "author"; 118 protected static final String LUCENE_ATTACHMENTS = "attachment"; 119 protected static final String LUCENE_PAGE_NAME = "name"; 120 protected static final String LUCENE_PAGE_KEYWORDS = "keywords"; 121 122 private String m_luceneDirectory; 123 protected final List< Object[] > m_updates = Collections.synchronizedList( new ArrayList<>() ); 124 125 /** Maximum number of fragments from search matches. */ 126 private static final int MAX_FRAGMENTS = 3; 127 128 /** The maximum number of hits to return from searches. */ 129 public static final int MAX_SEARCH_HITS = 99_999; 130 131 private static final String PUNCTUATION_TO_SPACES = StringUtils.repeat(" ", TextUtil.PUNCTUATION_CHARS_ALLOWED.length() ); 132 133 /** 134 * {@inheritDoc} 135 */ 136 @Override 137 public void initialize( final Engine engine, final Properties props ) throws NoRequiredPropertyException, IOException { 138 m_engine = engine; 139 searchExecutor = Executors.newCachedThreadPool(); 140 141 m_luceneDirectory = engine.getWorkDir()+File.separator+LUCENE_DIR; 142 143 final int initialDelay = TextUtil.getIntegerProperty( props, PROP_LUCENE_INITIALDELAY, LuceneUpdater.INITIAL_DELAY ); 144 final int indexDelay = TextUtil.getIntegerProperty( props, PROP_LUCENE_INDEXDELAY, LuceneUpdater.INDEX_DELAY ); 145 146 m_analyzerClass = TextUtil.getStringProperty( props, PROP_LUCENE_ANALYZER, m_analyzerClass ); 147 // FIXME: Just to be simple for now, we will do full reindex only if no files are in lucene directory. 148 149 final File dir = new File(m_luceneDirectory); 150 log.info("Lucene enabled, cache will be in: "+dir.getAbsolutePath()); 151 try { 152 if( !dir.exists() ) { 153 dir.mkdirs(); 154 } 155 156 if( !dir.exists() || !dir.canWrite() || !dir.canRead() ) { 157 log.error("Cannot write to Lucene directory, disabling Lucene: "+dir.getAbsolutePath()); 158 throw new IOException( "Invalid Lucene directory." ); 159 } 160 161 final String[] filelist = dir.list(); 162 if( filelist == null ) { 163 throw new IOException( "Invalid Lucene directory: cannot produce listing: "+dir.getAbsolutePath()); 164 } 165 } catch( final IOException e ) { 166 log.error("Problem while creating Lucene index - not using Lucene.", e); 167 } 168 169 // Start the Lucene update thread, which waits first 170 // for a little while before starting to go through 171 // the Lucene "pages that need updating". 172 final LuceneUpdater updater = new LuceneUpdater( m_engine, this, initialDelay, indexDelay ); 173 updater.start(); 174 } 175 176 /** 177 * Returns the handling engine. 178 * 179 * @return Current Engine 180 */ 181 protected Engine getEngine() 182 { 183 return m_engine; 184 } 185 186 /** 187 * Performs a full Lucene reindex, if necessary. 188 * 189 * @throws IOException If there's a problem during indexing 190 */ 191 protected void doFullLuceneReindex() throws IOException { 192 final File dir = new File(m_luceneDirectory); 193 final String[] filelist = dir.list(); 194 if( filelist == null ) { 195 throw new IOException( "Invalid Lucene directory: cannot produce listing: "+dir.getAbsolutePath()); 196 } 197 198 try { 199 if( filelist.length == 0 ) { 200 // 201 // No files? Reindex! 202 // 203 final Date start = new Date(); 204 205 log.info("Starting Lucene reindexing, this can take a couple of minutes..."); 206 207 final Directory luceneDir = new NIOFSDirectory( dir.toPath() ); 208 try( final IndexWriter writer = getIndexWriter( luceneDir ) ) { 209 final Collection< Page > allPages = m_engine.getManager( PageManager.class ).getAllPages(); 210 for( final Page page : allPages ) { 211 try { 212 final String text = m_engine.getManager( PageManager.class ).getPageText( page.getName(), WikiProvider.LATEST_VERSION ); 213 luceneIndexPage( page, text, writer ); 214 } catch( final IOException e ) { 215 log.warn( "Unable to index page " + page.getName() + ", continuing to next ", e ); 216 } 217 } 218 219 final Collection< Attachment > allAttachments = m_engine.getManager( AttachmentManager.class ).getAllAttachments(); 220 for( final Attachment att : allAttachments ) { 221 try { 222 final String text = getAttachmentContent( att.getName(), WikiProvider.LATEST_VERSION ); 223 luceneIndexPage( att, text, writer ); 224 } catch( final IOException e ) { 225 log.warn( "Unable to index attachment " + att.getName() + ", continuing to next", e ); 226 } 227 } 228 229 } 230 231 final Date end = new Date(); 232 log.info( "Full Lucene index finished in " + (end.getTime() - start.getTime()) + " milliseconds." ); 233 } else { 234 log.info("Files found in Lucene directory, not reindexing."); 235 } 236 } catch ( final IOException e ) { 237 log.error("Problem while creating Lucene index - not using Lucene.", e); 238 } catch ( final ProviderException e ) { 239 log.error("Problem reading pages while creating Lucene index (JSPWiki won't start.)", e); 240 throw new IllegalArgumentException("unable to create Lucene index"); 241 } catch( final Exception e ) { 242 log.error("Unable to start lucene",e); 243 } 244 245 } 246 247 /** 248 * Fetches the attachment content from the repository. 249 * Content is flat text that can be used for indexing/searching or display 250 * 251 * @param attachmentName Name of the attachment. 252 * @param version The version of the attachment. 253 * 254 * @return the content of the Attachment as a String. 255 */ 256 protected String getAttachmentContent( final String attachmentName, final int version ) { 257 final AttachmentManager mgr = m_engine.getManager( AttachmentManager.class ); 258 try { 259 final Attachment att = mgr.getAttachmentInfo( attachmentName, version ); 260 //FIXME: Find out why sometimes att is null 261 if( att != null ) { 262 return getAttachmentContent( att ); 263 } 264 } catch( final ProviderException e ) { 265 log.error("Attachment cannot be loaded", e); 266 } 267 return null; 268 } 269 270 /** 271 * @param att Attachment to get content for. Filename extension is used to determine the type of the attachment. 272 * @return String representing the content of the file. 273 * FIXME This is a very simple implementation of some text-based attachment, mainly used for testing. 274 * This should be replaced /moved to Attachment search providers or some other 'pluggable' way to search attachments 275 */ 276 protected String getAttachmentContent( final Attachment att ) { 277 final AttachmentManager mgr = m_engine.getManager( AttachmentManager.class ); 278 //FIXME: Add attachment plugin structure 279 280 final String filename = att.getFileName(); 281 282 boolean searchSuffix = false; 283 for( final String suffix : SEARCHABLE_FILE_SUFFIXES ) { 284 if( filename.endsWith( suffix ) ) { 285 searchSuffix = true; 286 break; 287 } 288 } 289 290 String out = filename; 291 if( searchSuffix ) { 292 try( final InputStream attStream = mgr.getAttachmentStream( att ); final StringWriter sout = new StringWriter() ) { 293 FileUtil.copyContents( new InputStreamReader( attStream ), sout ); 294 out = out + " " + sout; 295 } catch( final ProviderException | IOException e ) { 296 log.error("Attachment cannot be loaded", e); 297 } 298 } 299 300 return out; 301 } 302 303 /** 304 * Updates the lucene index for a single page. 305 * 306 * @param page The WikiPage to check 307 * @param text The page text to index. 308 */ 309 protected synchronized void updateLuceneIndex( final Page page, final String text ) { 310 log.debug("Updating Lucene index for page '" + page.getName() + "'..."); 311 pageRemoved( page ); 312 313 // Now add back the new version. 314 try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() ); 315 final IndexWriter writer = getIndexWriter( luceneDir ) ) { 316 luceneIndexPage( page, text, writer ); 317 } catch( final IOException e ) { 318 log.error("Unable to update page '" + page.getName() + "' from Lucene index", e); 319 // reindexPage( page ); 320 } catch( final Exception e ) { 321 log.error("Unexpected Lucene exception - please check configuration!",e); 322 // reindexPage( page ); 323 } 324 325 log.debug("Done updating Lucene index for page '" + page.getName() + "'."); 326 } 327 328 private Analyzer getLuceneAnalyzer() throws ProviderException { 329 try { 330 return ClassUtil.buildInstance( m_analyzerClass ); 331 } catch( final Exception e ) { 332 final String msg = "Could not get LuceneAnalyzer class " + m_analyzerClass + ", reason: "; 333 log.error( msg, e ); 334 throw new ProviderException( msg + e ); 335 } 336 } 337 338 /** 339 * Indexes page using the given IndexWriter. 340 * 341 * @param page WikiPage 342 * @param text Page text to index 343 * @param writer The Lucene IndexWriter to use for indexing 344 * @return the created index Document 345 * @throws IOException If there's an indexing problem 346 */ 347 protected Document luceneIndexPage( final Page page, final String text, final IndexWriter writer ) throws IOException { 348 log.debug( "Indexing {}...", page.getName() ); 349 350 // make a new, empty document 351 final Document doc = new Document(); 352 if( text == null ) { 353 return doc; 354 } 355 356 final String indexedText = text.replace( "__", " " ); // be nice to Language Analyzers - cfr. JSPWIKI-893 357 358 // Raw name is the keyword we'll use to refer to this document for updates. 359 Field field = new Field( LUCENE_ID, page.getName(), StringField.TYPE_STORED ); 360 doc.add( field ); 361 362 // Body text. It is stored in the doc for search contexts. 363 field = new Field( LUCENE_PAGE_CONTENTS, indexedText, TextField.TYPE_STORED ); 364 doc.add( field ); 365 366 // Allow searching by page name. Both beautified and raw 367 final String unTokenizedTitle = StringUtils.replaceChars( page.getName(), TextUtil.PUNCTUATION_CHARS_ALLOWED, PUNCTUATION_TO_SPACES ); 368 field = new Field( LUCENE_PAGE_NAME, TextUtil.beautifyString( page.getName() ) + " " + unTokenizedTitle, TextField.TYPE_STORED ); 369 doc.add( field ); 370 371 // Allow searching by authorname 372 if( page.getAuthor() != null ) { 373 field = new Field( LUCENE_AUTHOR, page.getAuthor(), TextField.TYPE_STORED ); 374 doc.add( field ); 375 } 376 377 // Now add the names of the attachments of this page 378 try { 379 final List< Attachment > attachments = m_engine.getManager( AttachmentManager.class ).listAttachments( page ); 380 final StringBuilder attachmentNames = new StringBuilder(); 381 382 for( final Attachment att : attachments ) { 383 attachmentNames.append( att.getName() ).append( ";" ); 384 } 385 field = new Field( LUCENE_ATTACHMENTS, attachmentNames.toString(), TextField.TYPE_STORED ); 386 doc.add( field ); 387 388 } catch( final ProviderException e ) { 389 // Unable to read attachments 390 log.error( "Failed to get attachments for page", e ); 391 } 392 393 // also index page keywords, if available 394 if( page.getAttribute( "keywords" ) != null ) { 395 field = new Field( LUCENE_PAGE_KEYWORDS, page.getAttribute( "keywords" ).toString(), TextField.TYPE_STORED ); 396 doc.add( field ); 397 } 398 synchronized( writer ) { 399 writer.addDocument(doc); 400 } 401 402 return doc; 403 } 404 405 /** 406 * {@inheritDoc} 407 */ 408 @Override 409 public synchronized void pageRemoved( final Page page ) { 410 try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() ); 411 final IndexWriter writer = getIndexWriter( luceneDir ) ) { 412 final Query query = new TermQuery( new Term( LUCENE_ID, page.getName() ) ); 413 writer.deleteDocuments( query ); 414 } catch ( final Exception e ) { 415 log.error("Unable to remove page '" + page.getName() + "' from Lucene index", e); 416 } 417 } 418 419 IndexWriter getIndexWriter(final Directory luceneDir ) throws IOException, ProviderException { 420 final IndexWriterConfig writerConfig = new IndexWriterConfig( getLuceneAnalyzer() ); 421 writerConfig.setOpenMode( OpenMode.CREATE_OR_APPEND ); 422 return new IndexWriter( luceneDir, writerConfig ); 423 } 424 425 /** 426 * Adds a page-text pair to the lucene update queue. Safe to call always 427 * 428 * @param page WikiPage to add to the update queue. 429 */ 430 @Override 431 public void reindexPage( final Page page ) { 432 if( page != null ) { 433 final String text; 434 435 // TODO: Think if this was better done in the thread itself? 436 if( page instanceof Attachment ) { 437 text = getAttachmentContent( ( Attachment )page ); 438 } else { 439 text = m_engine.getManager( PageManager.class ).getPureText( page ); 440 } 441 442 if( text != null ) { 443 // Add work item to m_updates queue. 444 final Object[] pair = new Object[2]; 445 pair[0] = page; 446 pair[1] = text; 447 m_updates.add( pair ); 448 log.debug("Scheduling page " + page.getName() + " for index update"); 449 } 450 } 451 } 452 453 /** 454 * {@inheritDoc} 455 */ 456 @Override 457 public Collection< SearchResult > findPages( final String query, final Context wikiContext ) throws ProviderException { 458 return findPages( query, FLAG_CONTEXTS, wikiContext ); 459 } 460 461 /** 462 * Create contexts also. Generating contexts can be expensive, 463 * so they're not on by default. 464 */ 465 public static final int FLAG_CONTEXTS = 0x01; 466 467 /** 468 * Searches pages using a particular combination of flags. 469 * 470 * @param query The query to perform in Lucene query language 471 * @param flags A set of flags 472 * @return A Collection of SearchResult instances 473 * @throws ProviderException if there is a problem with the backend 474 */ 475 public Collection< SearchResult > findPages( final String query, final int flags, final Context wikiContext ) throws ProviderException { 476 ArrayList<SearchResult> list = null; 477 Highlighter highlighter = null; 478 479 try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() ); 480 final IndexReader reader = DirectoryReader.open( luceneDir ) ) { 481 final String[] queryfields = { LUCENE_PAGE_CONTENTS, LUCENE_PAGE_NAME, LUCENE_AUTHOR, LUCENE_ATTACHMENTS, LUCENE_PAGE_KEYWORDS }; 482 final QueryParser qp = new MultiFieldQueryParser( queryfields, getLuceneAnalyzer() ); 483 final Query luceneQuery = qp.parse( query ); 484 final IndexSearcher searcher = new IndexSearcher( reader, searchExecutor ); 485 486 if( (flags & FLAG_CONTEXTS) != 0 ) { 487 highlighter = new Highlighter(new SimpleHTMLFormatter("<span class=\"searchmatch\">", "</span>"), 488 new SimpleHTMLEncoder(), 489 new QueryScorer( luceneQuery ) ); 490 } 491 492 final ScoreDoc[] hits = searcher.search(luceneQuery, MAX_SEARCH_HITS).scoreDocs; 493 final AuthorizationManager mgr = m_engine.getManager( AuthorizationManager.class ); 494 495 list = new ArrayList<>(hits.length); 496 for( final ScoreDoc hit : hits ) { 497 final int docID = hit.doc; 498 final Document doc = searcher.doc( docID ); 499 final String pageName = doc.get( LUCENE_ID ); 500 final Page page = m_engine.getManager( PageManager.class ).getPage( pageName, PageProvider.LATEST_VERSION ); 501 502 if( page != null ) { 503 final PagePermission pp = new PagePermission( page, PagePermission.VIEW_ACTION ); 504 if( mgr.checkPermission( wikiContext.getWikiSession(), pp ) ) { 505 final int score = ( int )( hit.score * 100 ); 506 507 // Get highlighted search contexts 508 final String text = doc.get( LUCENE_PAGE_CONTENTS ); 509 510 String[] fragments = new String[ 0 ]; 511 if( text != null && highlighter != null ) { 512 final TokenStream tokenStream = getLuceneAnalyzer() 513 .tokenStream( LUCENE_PAGE_CONTENTS, new StringReader( text ) ); 514 fragments = highlighter.getBestFragments( tokenStream, text, MAX_FRAGMENTS ); 515 } 516 517 final SearchResult result = new SearchResultImpl( page, score, fragments ); 518 list.add( result ); 519 } 520 } else { 521 log.error( "Lucene found a result page '" + pageName + "' that could not be loaded, removing from Lucene cache" ); 522 pageRemoved( Wiki.contents().page( m_engine, pageName ) ); 523 } 524 } 525 } catch( final IOException e ) { 526 log.error("Failed during lucene search",e); 527 } catch( final ParseException e ) { 528 log.info("Broken query; cannot parse query: " + query, e); 529 throw new ProviderException( "You have entered a query Lucene cannot process [" + query + "]: " + e.getMessage() ); 530 } catch( final InvalidTokenOffsetsException e ) { 531 log.error("Tokens are incompatible with provided text ",e); 532 } 533 534 return list; 535 } 536 537 /** 538 * {@inheritDoc} 539 */ 540 @Override 541 public String getProviderInfo() 542 { 543 return "LuceneSearchProvider"; 544 } 545 546 /** 547 * Updater thread that updates Lucene indexes. 548 */ 549 private static final class LuceneUpdater extends WikiBackgroundThread { 550 static final int INDEX_DELAY = 5; 551 static final int INITIAL_DELAY = 60; 552 private final LuceneSearchProvider m_provider; 553 554 private final int m_initialDelay; 555 556 private WatchDog m_watchdog; 557 558 private LuceneUpdater( final Engine engine, final LuceneSearchProvider provider, final int initialDelay, final int indexDelay ) { 559 super( engine, indexDelay ); 560 m_provider = provider; 561 m_initialDelay = initialDelay; 562 setName("JSPWiki Lucene Indexer"); 563 } 564 565 @Override 566 public void startupTask() throws Exception { 567 m_watchdog = WatchDog.getCurrentWatchDog( getEngine() ); 568 569 // Sleep initially... 570 try { 571 Thread.sleep( m_initialDelay * 1000L ); 572 } catch( final InterruptedException e ) { 573 throw new InternalWikiException("Interrupted while waiting to start.", e); 574 } 575 576 m_watchdog.enterState( "Full reindex" ); 577 // Reindex everything 578 m_provider.doFullLuceneReindex(); 579 m_watchdog.exitState(); 580 } 581 582 @Override 583 public void backgroundTask() { 584 m_watchdog.enterState("Emptying index queue", 60); 585 586 synchronized ( m_provider.m_updates ) { 587 while( m_provider.m_updates.size() > 0 ) { 588 final Object[] pair = m_provider.m_updates.remove(0); 589 final Page page = ( Page ) pair[0]; 590 final String text = ( String ) pair[1]; 591 m_provider.updateLuceneIndex(page, text); 592 } 593 } 594 595 m_watchdog.exitState(); 596 } 597 598 } 599 600 // FIXME: This class is dumb; needs to have a better implementation 601 private static class SearchResultImpl implements SearchResult { 602 603 private final Page m_page; 604 private final int m_score; 605 private final String[] m_contexts; 606 607 public SearchResultImpl( final Page page, final int score, final String[] contexts ) { 608 m_page = page; 609 m_score = score; 610 m_contexts = contexts != null ? contexts.clone() : null; 611 } 612 613 @Override 614 public Page getPage() 615 { 616 return m_page; 617 } 618 619 /* (non-Javadoc) 620 * @see org.apache.wiki.SearchResult#getScore() 621 */ 622 @Override 623 public int getScore() 624 { 625 return m_score; 626 } 627 628 629 @Override 630 public String[] getContexts() 631 { 632 return m_contexts; 633 } 634 } 635 636}