001/*
002    Licensed to the Apache Software Foundation (ASF) under one
003    or more contributor license agreements.  See the NOTICE file
004    distributed with this work for additional information
005    regarding copyright ownership.  The ASF licenses this file
006    to you under the Apache License, Version 2.0 (the
007    "License"); you may not use this file except in compliance
008    with the License.  You may obtain a copy of the License at
009
010       http://www.apache.org/licenses/LICENSE-2.0
011
012    Unless required by applicable law or agreed to in writing,
013    software distributed under the License is distributed on an
014    "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
015    KIND, either express or implied.  See the License for the
016    specific language governing permissions and limitations
017    under the License.    
018 */
019package org.apache.wiki.search;
020
021import org.apache.commons.lang3.StringUtils;
022import org.apache.logging.log4j.LogManager;
023import org.apache.logging.log4j.Logger;
024import org.apache.lucene.analysis.Analyzer;
025import org.apache.lucene.analysis.TokenStream;
026import org.apache.lucene.document.Document;
027import org.apache.lucene.document.Field;
028import org.apache.lucene.document.StringField;
029import org.apache.lucene.document.TextField;
030import org.apache.lucene.index.DirectoryReader;
031import org.apache.lucene.index.IndexReader;
032import org.apache.lucene.index.IndexWriter;
033import org.apache.lucene.index.IndexWriterConfig;
034import org.apache.lucene.index.IndexWriterConfig.OpenMode;
035import org.apache.lucene.index.Term;
036import org.apache.lucene.queryparser.classic.MultiFieldQueryParser;
037import org.apache.lucene.queryparser.classic.ParseException;
038import org.apache.lucene.queryparser.classic.QueryParser;
039import org.apache.lucene.search.IndexSearcher;
040import org.apache.lucene.search.Query;
041import org.apache.lucene.search.ScoreDoc;
042import org.apache.lucene.search.TermQuery;
043import org.apache.lucene.search.highlight.Highlighter;
044import org.apache.lucene.search.highlight.InvalidTokenOffsetsException;
045import org.apache.lucene.search.highlight.QueryScorer;
046import org.apache.lucene.search.highlight.SimpleHTMLEncoder;
047import org.apache.lucene.search.highlight.SimpleHTMLFormatter;
048import org.apache.lucene.store.Directory;
049import org.apache.lucene.store.NIOFSDirectory;
050import org.apache.wiki.InternalWikiException;
051import org.apache.wiki.WatchDog;
052import org.apache.wiki.WikiBackgroundThread;
053import org.apache.wiki.api.core.Attachment;
054import org.apache.wiki.api.core.Context;
055import org.apache.wiki.api.core.Engine;
056import org.apache.wiki.api.core.Page;
057import org.apache.wiki.api.exceptions.NoRequiredPropertyException;
058import org.apache.wiki.api.exceptions.ProviderException;
059import org.apache.wiki.api.providers.PageProvider;
060import org.apache.wiki.api.providers.WikiProvider;
061import org.apache.wiki.api.search.SearchResult;
062import org.apache.wiki.api.spi.Wiki;
063import org.apache.wiki.attachment.AttachmentManager;
064import org.apache.wiki.auth.AuthorizationManager;
065import org.apache.wiki.auth.permissions.PagePermission;
066import org.apache.wiki.pages.PageManager;
067import org.apache.wiki.util.ClassUtil;
068import org.apache.wiki.util.FileUtil;
069import org.apache.wiki.util.TextUtil;
070
071import java.io.*;
072import java.lang.reflect.Constructor;
073import java.util.*;
074import java.util.concurrent.Executor;
075import java.util.concurrent.Executors;
076
077
078/**
079 *  Interface for the search providers that handle searching the Wiki
080 *
081 *  @since 2.2.21.
082 */
083public class LuceneSearchProvider implements SearchProvider {
084
085    protected static final Logger log = LogManager.getLogger(LuceneSearchProvider.class);
086
087    private Engine m_engine;
088    private Executor searchExecutor;
089
090    // Lucene properties.
091
092    /** Which analyzer to use.  Default is StandardAnalyzer. */
093    public static final String PROP_LUCENE_ANALYZER      = "jspwiki.lucene.analyzer";
094    private static final String PROP_LUCENE_INDEXDELAY   = "jspwiki.lucene.indexdelay";
095    private static final String PROP_LUCENE_INITIALDELAY = "jspwiki.lucene.initialdelay";
096
097    private String m_analyzerClass = "org.apache.lucene.analysis.standard.ClassicAnalyzer";
098
099    private static final String LUCENE_DIR = "lucene";
100
101    /** These attachment file suffixes will be indexed. */
102    public static final String[] SEARCHABLE_FILE_SUFFIXES = new String[] { ".txt", ".ini", ".xml", ".html", "htm", ".mm", ".htm",
103                                                                           ".xhtml", ".java", ".c", ".cpp", ".php", ".asm", ".sh",
104                                                                           ".properties", ".kml", ".gpx", ".loc", ".md", ".xml" };
105
106    protected static final String LUCENE_ID            = "id";
107    protected static final String LUCENE_PAGE_CONTENTS = "contents";
108    protected static final String LUCENE_AUTHOR        = "author";
109    protected static final String LUCENE_ATTACHMENTS   = "attachment";
110    protected static final String LUCENE_PAGE_NAME     = "name";
111    protected static final String LUCENE_PAGE_KEYWORDS = "keywords";
112
113    private String m_luceneDirectory;
114    protected final List< Object[] > m_updates = Collections.synchronizedList( new ArrayList<>() );
115
116    /** Maximum number of fragments from search matches. */
117    private static final int MAX_FRAGMENTS = 3;
118
119    /** The maximum number of hits to return from searches. */
120    public static final int MAX_SEARCH_HITS = 99_999;
121    
122    private static final String PUNCTUATION_TO_SPACES = StringUtils.repeat(" ", TextUtil.PUNCTUATION_CHARS_ALLOWED.length() );
123
124    /**
125     *  {@inheritDoc}
126     */
127    @Override
128    public void initialize( final Engine engine, final Properties props ) throws NoRequiredPropertyException, IOException  {
129        m_engine = engine;
130        searchExecutor = Executors.newCachedThreadPool();
131
132        m_luceneDirectory = engine.getWorkDir()+File.separator+LUCENE_DIR;
133
134        final int initialDelay = TextUtil.getIntegerProperty( props, PROP_LUCENE_INITIALDELAY, LuceneUpdater.INITIAL_DELAY );
135        final int indexDelay   = TextUtil.getIntegerProperty( props, PROP_LUCENE_INDEXDELAY, LuceneUpdater.INDEX_DELAY );
136
137        m_analyzerClass = TextUtil.getStringProperty( props, PROP_LUCENE_ANALYZER, m_analyzerClass );
138        // FIXME: Just to be simple for now, we will do full reindex only if no files are in lucene directory.
139
140        final File dir = new File(m_luceneDirectory);
141        log.info("Lucene enabled, cache will be in: "+dir.getAbsolutePath());
142        try {
143            if( !dir.exists() ) {
144                dir.mkdirs();
145            }
146
147            if( !dir.exists() || !dir.canWrite() || !dir.canRead() ) {
148                log.error("Cannot write to Lucene directory, disabling Lucene: "+dir.getAbsolutePath());
149                throw new IOException( "Invalid Lucene directory." );
150            }
151
152            final String[] filelist = dir.list();
153            if( filelist == null ) {
154                throw new IOException( "Invalid Lucene directory: cannot produce listing: "+dir.getAbsolutePath());
155            }
156        } catch( final IOException e ) {
157            log.error("Problem while creating Lucene index - not using Lucene.", e);
158        }
159
160        // Start the Lucene update thread, which waits first
161        // for a little while before starting to go through
162        // the Lucene "pages that need updating".
163        final LuceneUpdater updater = new LuceneUpdater( m_engine, this, initialDelay, indexDelay );
164        updater.start();
165    }
166
167    /**
168     *  Returns the handling engine.
169     *
170     *  @return Current Engine
171     */
172    protected Engine getEngine()
173    {
174        return m_engine;
175    }
176
177    /**
178     *  Performs a full Lucene reindex, if necessary.
179     *
180     *  @throws IOException If there's a problem during indexing
181     */
182    protected void doFullLuceneReindex() throws IOException {
183        final File dir = new File(m_luceneDirectory);
184        final String[] filelist = dir.list();
185        if( filelist == null ) {
186            throw new IOException( "Invalid Lucene directory: cannot produce listing: "+dir.getAbsolutePath());
187        }
188
189        try {
190            if( filelist.length == 0 ) {
191                //
192                //  No files? Reindex!
193                //
194                final Date start = new Date();
195
196                log.info("Starting Lucene reindexing, this can take a couple of minutes...");
197
198                final Directory luceneDir = new NIOFSDirectory( dir.toPath() );
199                try( final IndexWriter writer = getIndexWriter( luceneDir ) ) {
200                    final Collection< Page > allPages = m_engine.getManager( PageManager.class ).getAllPages();
201                    for( final Page page : allPages ) {
202                        try {
203                            final String text = m_engine.getManager( PageManager.class ).getPageText( page.getName(), WikiProvider.LATEST_VERSION );
204                            luceneIndexPage( page, text, writer );
205                        } catch( final IOException e ) {
206                            log.warn( "Unable to index page " + page.getName() + ", continuing to next ", e );
207                        }
208                    }
209
210                    final Collection< Attachment > allAttachments = m_engine.getManager( AttachmentManager.class ).getAllAttachments();
211                    for( final Attachment att : allAttachments ) {
212                        try {
213                            final String text = getAttachmentContent( att.getName(), WikiProvider.LATEST_VERSION );
214                            luceneIndexPage( att, text, writer );
215                        } catch( final IOException e ) {
216                            log.warn( "Unable to index attachment " + att.getName() + ", continuing to next", e );
217                        }
218                    }
219
220                }
221
222                final Date end = new Date();
223                log.info( "Full Lucene index finished in " + (end.getTime() - start.getTime()) + " milliseconds." );
224            } else {
225                log.info("Files found in Lucene directory, not reindexing.");
226            }
227        } catch ( final IOException e ) {
228            log.error("Problem while creating Lucene index - not using Lucene.", e);
229        } catch ( final ProviderException e ) {
230            log.error("Problem reading pages while creating Lucene index (JSPWiki won't start.)", e);
231            throw new IllegalArgumentException("unable to create Lucene index");
232        } catch( final Exception e ) {
233            log.error("Unable to start lucene",e);
234        }
235
236    }
237
238    /**
239     *  Fetches the attachment content from the repository.
240     *  Content is flat text that can be used for indexing/searching or display
241     *  
242     *  @param attachmentName Name of the attachment.
243     *  @param version The version of the attachment.
244     *  
245     *  @return the content of the Attachment as a String.
246     */
247    protected String getAttachmentContent( final String attachmentName, final int version ) {
248        final AttachmentManager mgr = m_engine.getManager( AttachmentManager.class );
249        try {
250            final Attachment att = mgr.getAttachmentInfo( attachmentName, version );
251            //FIXME: Find out why sometimes att is null
252            if( att != null ) {
253                return getAttachmentContent( att );
254            }
255        } catch( final ProviderException e ) {
256            log.error("Attachment cannot be loaded", e);
257        }
258        return null;
259    }
260
261    /**
262     * @param att Attachment to get content for. Filename extension is used to determine the type of the attachment.
263     * @return String representing the content of the file.
264     * FIXME This is a very simple implementation of some text-based attachment, mainly used for testing.
265     * This should be replaced /moved to Attachment search providers or some other 'pluggable' way to search attachments
266     */
267    protected String getAttachmentContent( final Attachment att ) {
268        final AttachmentManager mgr = m_engine.getManager( AttachmentManager.class );
269        //FIXME: Add attachment plugin structure
270
271        final String filename = att.getFileName();
272
273        boolean searchSuffix = false;
274        for( final String suffix : SEARCHABLE_FILE_SUFFIXES ) {
275            if( filename.endsWith( suffix ) ) {
276                searchSuffix = true;
277                break;
278            }
279        }
280
281        String out = filename;
282        if( searchSuffix ) {
283            try( final InputStream attStream = mgr.getAttachmentStream( att ); final StringWriter sout = new StringWriter() ) {
284                FileUtil.copyContents( new InputStreamReader( attStream ), sout );
285                out = out + " " + sout;
286            } catch( final ProviderException | IOException e ) {
287                log.error("Attachment cannot be loaded", e);
288            }
289        }
290
291        return out;
292    }
293
294    /**
295     *  Updates the lucene index for a single page.
296     *
297     *  @param page The WikiPage to check
298     *  @param text The page text to index.
299     */
300    protected synchronized void updateLuceneIndex( final Page page, final String text ) {
301        log.debug("Updating Lucene index for page '" + page.getName() + "'...");
302        pageRemoved( page );
303
304        // Now add back the new version.
305        try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() );
306             final IndexWriter writer = getIndexWriter( luceneDir ) ) {
307            luceneIndexPage( page, text, writer );
308        } catch( final IOException e ) {
309            log.error("Unable to update page '" + page.getName() + "' from Lucene index", e);
310            // reindexPage( page );
311        } catch( final Exception e ) {
312            log.error("Unexpected Lucene exception - please check configuration!",e);
313            // reindexPage( page );
314        }
315
316        log.debug("Done updating Lucene index for page '" + page.getName() + "'.");
317    }
318
319    private Analyzer getLuceneAnalyzer() throws ProviderException {
320        try {
321            final Class< ? > clazz = ClassUtil.findClass( "", m_analyzerClass );
322            final Constructor< ? > constructor = clazz.getConstructor();
323            return ( Analyzer )constructor.newInstance();
324        } catch( final Exception e ) {
325            final String msg = "Could not get LuceneAnalyzer class " + m_analyzerClass + ", reason: ";
326            log.error( msg, e );
327            throw new ProviderException( msg + e );
328        }
329    }
330
331    /**
332     *  Indexes page using the given IndexWriter.
333     *
334     *  @param page WikiPage
335     *  @param text Page text to index
336     *  @param writer The Lucene IndexWriter to use for indexing
337     *  @return the created index Document
338     *  @throws IOException If there's an indexing problem
339     */
340    protected Document luceneIndexPage( final Page page, final String text, final IndexWriter writer ) throws IOException {
341        if( log.isDebugEnabled() ) {
342            log.debug( "Indexing " + page.getName() + "..." );
343        }
344        
345        // make a new, empty document
346        final Document doc = new Document();
347
348        if( text == null ) {
349            return doc;
350        }
351        final String indexedText = text.replace( "__", " " ); // be nice to Language Analyzers - cfr. JSPWIKI-893
352
353        // Raw name is the keyword we'll use to refer to this document for updates.
354        Field field = new Field( LUCENE_ID, page.getName(), StringField.TYPE_STORED );
355        doc.add( field );
356
357        // Body text.  It is stored in the doc for search contexts.
358        field = new Field( LUCENE_PAGE_CONTENTS, indexedText, TextField.TYPE_STORED );
359        doc.add( field );
360
361        // Allow searching by page name. Both beautified and raw
362        final String unTokenizedTitle = StringUtils.replaceChars( page.getName(), TextUtil.PUNCTUATION_CHARS_ALLOWED, PUNCTUATION_TO_SPACES );
363        field = new Field( LUCENE_PAGE_NAME, TextUtil.beautifyString( page.getName() ) + " " + unTokenizedTitle, TextField.TYPE_STORED );
364        doc.add( field );
365
366        // Allow searching by authorname
367        if( page.getAuthor() != null ) {
368            field = new Field( LUCENE_AUTHOR, page.getAuthor(), TextField.TYPE_STORED );
369            doc.add( field );
370        }
371
372        // Now add the names of the attachments of this page
373        try {
374            final List< Attachment > attachments = m_engine.getManager( AttachmentManager.class ).listAttachments( page );
375            final StringBuilder attachmentNames = new StringBuilder();
376
377            for( final Attachment att : attachments ) {
378                attachmentNames.append( att.getName() ).append( ";" );
379            }
380            field = new Field( LUCENE_ATTACHMENTS, attachmentNames.toString(), TextField.TYPE_STORED );
381            doc.add( field );
382
383        } catch( final ProviderException e ) {
384            // Unable to read attachments
385            log.error( "Failed to get attachments for page", e );
386        }
387
388        // also index page keywords, if available
389        if( page.getAttribute( "keywords" ) != null ) {
390            field = new Field( LUCENE_PAGE_KEYWORDS, page.getAttribute( "keywords" ).toString(), TextField.TYPE_STORED );
391            doc.add( field );
392        }
393        synchronized( writer ) {
394            writer.addDocument(doc);
395        }
396
397        return doc;
398    }
399
400    /**
401     *  {@inheritDoc}
402     */
403    @Override
404    public synchronized void pageRemoved( final Page page ) {
405        try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() );
406             final IndexWriter writer = getIndexWriter( luceneDir ) ) {
407            final Query query = new TermQuery( new Term( LUCENE_ID, page.getName() ) );
408            writer.deleteDocuments( query );
409        } catch ( final Exception e ) {
410            log.error("Unable to remove page '" + page.getName() + "' from Lucene index", e);
411        }
412    }
413    
414    IndexWriter getIndexWriter(final  Directory luceneDir ) throws IOException, ProviderException {
415        final IndexWriterConfig writerConfig = new IndexWriterConfig( getLuceneAnalyzer() );
416        writerConfig.setOpenMode( OpenMode.CREATE_OR_APPEND );
417        return new IndexWriter( luceneDir, writerConfig );
418    }
419    
420    /**
421     *  Adds a page-text pair to the lucene update queue.  Safe to call always
422     *
423     *  @param page WikiPage to add to the update queue.
424     */
425    @Override
426    public void reindexPage( final Page page ) {
427        if( page != null ) {
428            final String text;
429
430            // TODO: Think if this was better done in the thread itself?
431            if( page instanceof Attachment ) {
432                text = getAttachmentContent( ( Attachment )page );
433            } else {
434                text = m_engine.getManager( PageManager.class ).getPureText( page );
435            }
436
437            if( text != null ) {
438                // Add work item to m_updates queue.
439                final Object[] pair = new Object[2];
440                pair[0] = page;
441                pair[1] = text;
442                m_updates.add( pair );
443                log.debug("Scheduling page " + page.getName() + " for index update");
444            }
445        }
446    }
447
448    /**
449     *  {@inheritDoc}
450     */
451    @Override
452    public Collection< SearchResult > findPages( final String query, final Context wikiContext ) throws ProviderException {
453        return findPages( query, FLAG_CONTEXTS, wikiContext );
454    }
455
456    /**
457     *  Create contexts also.  Generating contexts can be expensive,
458     *  so they're not on by default.
459     */
460    public static final int FLAG_CONTEXTS = 0x01;
461
462    /**
463     *  Searches pages using a particular combination of flags.
464     *
465     *  @param query The query to perform in Lucene query language
466     *  @param flags A set of flags
467     *  @return A Collection of SearchResult instances
468     *  @throws ProviderException if there is a problem with the backend
469     */
470    public Collection< SearchResult > findPages( final String query, final int flags, final Context wikiContext ) throws ProviderException {
471        ArrayList<SearchResult> list = null;
472        Highlighter highlighter = null;
473
474        try( final Directory luceneDir = new NIOFSDirectory( new File( m_luceneDirectory ).toPath() );
475             final IndexReader reader = DirectoryReader.open( luceneDir ) ) {
476            final String[] queryfields = { LUCENE_PAGE_CONTENTS, LUCENE_PAGE_NAME, LUCENE_AUTHOR, LUCENE_ATTACHMENTS, LUCENE_PAGE_KEYWORDS };
477            final QueryParser qp = new MultiFieldQueryParser( queryfields, getLuceneAnalyzer() );
478            final Query luceneQuery = qp.parse( query );
479            final IndexSearcher searcher = new IndexSearcher( reader, searchExecutor );
480
481            if( (flags & FLAG_CONTEXTS) != 0 ) {
482                highlighter = new Highlighter(new SimpleHTMLFormatter("<span class=\"searchmatch\">", "</span>"),
483                                              new SimpleHTMLEncoder(),
484                                              new QueryScorer( luceneQuery ) );
485            }
486
487            final ScoreDoc[] hits = searcher.search(luceneQuery, MAX_SEARCH_HITS).scoreDocs;
488            final AuthorizationManager mgr = m_engine.getManager( AuthorizationManager.class );
489
490            list = new ArrayList<>(hits.length);
491            for( final ScoreDoc hit : hits ) {
492                final int docID = hit.doc;
493                final Document doc = searcher.doc( docID );
494                final String pageName = doc.get( LUCENE_ID );
495                final Page page = m_engine.getManager( PageManager.class ).getPage( pageName, PageProvider.LATEST_VERSION );
496
497                if( page != null ) {
498                    final PagePermission pp = new PagePermission( page, PagePermission.VIEW_ACTION );
499                    if( mgr.checkPermission( wikiContext.getWikiSession(), pp ) ) {
500                        final int score = ( int )( hit.score * 100 );
501
502                        // Get highlighted search contexts
503                        final String text = doc.get( LUCENE_PAGE_CONTENTS );
504
505                        String[] fragments = new String[ 0 ];
506                        if( text != null && highlighter != null ) {
507                            final TokenStream tokenStream = getLuceneAnalyzer()
508                                    .tokenStream( LUCENE_PAGE_CONTENTS, new StringReader( text ) );
509                            fragments = highlighter.getBestFragments( tokenStream, text, MAX_FRAGMENTS );
510                        }
511
512                        final SearchResult result = new SearchResultImpl( page, score, fragments );
513                        list.add( result );
514                    }
515                } else {
516                    log.error( "Lucene found a result page '" + pageName + "' that could not be loaded, removing from Lucene cache" );
517                    pageRemoved( Wiki.contents().page( m_engine, pageName ) );
518                }
519            }
520        } catch( final IOException e ) {
521            log.error("Failed during lucene search",e);
522        } catch( final ParseException e ) {
523            log.info("Broken query; cannot parse query: " + query, e);
524            throw new ProviderException( "You have entered a query Lucene cannot process [" + query + "]: " + e.getMessage() );
525        } catch( final InvalidTokenOffsetsException e ) {
526            log.error("Tokens are incompatible with provided text ",e);
527        }
528
529        return list;
530    }
531
532    /**
533     *  {@inheritDoc}
534     */
535    @Override
536    public String getProviderInfo()
537    {
538        return "LuceneSearchProvider";
539    }
540
541    /**
542     * Updater thread that updates Lucene indexes.
543     */
544    private static final class LuceneUpdater extends WikiBackgroundThread {
545        static final int INDEX_DELAY    = 5;
546        static final int INITIAL_DELAY = 60;
547        private final LuceneSearchProvider m_provider;
548
549        private final int m_initialDelay;
550
551        private WatchDog m_watchdog;
552
553        private LuceneUpdater( final Engine engine, final LuceneSearchProvider provider, final int initialDelay, final int indexDelay ) {
554            super( engine, indexDelay );
555            m_provider = provider;
556            m_initialDelay = initialDelay;
557            setName("JSPWiki Lucene Indexer");
558        }
559
560        @Override
561        public void startupTask() throws Exception {
562            m_watchdog = WatchDog.getCurrentWatchDog( getEngine() );
563
564            // Sleep initially...
565            try {
566                Thread.sleep( m_initialDelay * 1000L );
567            } catch( final InterruptedException e ) {
568                throw new InternalWikiException("Interrupted while waiting to start.", e);
569            }
570
571            m_watchdog.enterState( "Full reindex" );
572            // Reindex everything
573            m_provider.doFullLuceneReindex();
574            m_watchdog.exitState();
575        }
576
577        @Override
578        public void backgroundTask() {
579            m_watchdog.enterState("Emptying index queue", 60);
580
581            synchronized ( m_provider.m_updates ) {
582                while( m_provider.m_updates.size() > 0 ) {
583                    final Object[] pair = m_provider.m_updates.remove(0);
584                    final Page page = ( Page ) pair[0];
585                    final String text = ( String ) pair[1];
586                    m_provider.updateLuceneIndex(page, text);
587                }
588            }
589
590            m_watchdog.exitState();
591        }
592
593    }
594
595    // FIXME: This class is dumb; needs to have a better implementation
596    private static class SearchResultImpl implements SearchResult {
597
598        private final Page m_page;
599        private final int      m_score;
600        private final String[] m_contexts;
601
602        public SearchResultImpl( final Page page, final int score, final String[] contexts ) {
603            m_page = page;
604            m_score = score;
605            m_contexts = contexts != null ? contexts.clone() : null;
606        }
607
608        @Override
609        public Page getPage()
610        {
611            return m_page;
612        }
613
614        /* (non-Javadoc)
615         * @see org.apache.wiki.SearchResult#getScore()
616         */
617        @Override
618        public int getScore()
619        {
620            return m_score;
621        }
622
623
624        @Override
625        public String[] getContexts()
626        {
627            return m_contexts;
628        }
629    }
630
631}