Sphinx源码学习笔记(二):查询关键词

栏目: 编程工具 · 发布时间: 6年前

内容简介:继续上一篇索引创建流程完成,学习理解一下查询关键词的逻辑处理流程。   索引创建流程可参考:https://cloud.tencent.com/developer/article/1150671索引查询主要源码是在searchd.cpp文件中。

前言

  继续上一篇索引创建流程完成,学习理解一下查询关键词的逻辑处理流程。   索引创建流程可参考:https://cloud.tencent.com/developer/article/1150671

索引查询

 索引查询主要源码是在searchd.cpp文件中。

创建索引对象

ServedDesc_t tIdx;
    // check path
    if ( !hIndex.Exists ( "path" ) )
    {
        sphWarning ( "index '%s': key 'path' not found - NOT SERVING", szIndexName );
        return ADD_ERROR;
    }

    // check name
    if ( g_pLocalIndexes->Exists ( szIndexName ) )
    {
        sphWarning ( "index '%s': duplicate name - NOT SERVING", szIndexName );
        return ADD_ERROR;
    }

    // configure memlocking, star
    ConfigureLocalIndex ( tIdx, hIndex );

    // try to create index
    tIdx.m_sIndexPath = hIndex["path"].strval();
    PreCreatePlainIndex ( tIdx, szIndexName );
    tIdx.m_pIndex->SetCacheSize ( g_iMaxCachedDocs, g_iMaxCachedHits );
    CSphIndexStatus tStatus;
    tIdx.m_pIndex->GetStatus ( &tStatus );
    tIdx.m_iMass = CalculateMass ( tStatus );

    // done
    if ( !g_pLocalIndexes->Add ( tIdx, szIndexName ) )
    {
        sphWarning ( "INTERNAL ERROR: index '%s': hash add failed - NOT SERVING", szIndexName );
        return ADD_ERROR;
    }

    // leak pointer, so it's destructor won't delete it
    tIdx.m_pIndex = NULL;
    return ADD_LOCAL;
  • ServedDesc_t是保存查询过程中的上下文对象,定义如下主要保存查询索引对象的指针、索引路径、索引新的路径(用于增量更新索引)以及其它属性字段信息。
struct ServedDesc_t
{
  CSphIndex *            m_pIndex;
  CSphString            m_sIndexPath;
  CSphString            m_sNewPath;
  bool                m_bEnabled;        ///< to disable index in cases when rotation fails
  bool                m_bMlock;
  bool                m_bPreopen;
  bool                m_bExpand;
  bool                m_bToDelete;
  bool                m_bOnlyNew;
  bool                m_bRT;
  CSphString            m_sGlobalIDFPath;
  bool                m_bOnDiskAttrs;
  bool                m_bOnDiskPools;
  int64_t                m_iMass; // relative weight (by access speed) of the index
};
  • 调用ConfigureLocalIndex加在配置文件中的配置信息
void ConfigureLocalIndex ( ServedDesc_t & tIdx, const CSphConfigSection & hIndex )
{
  tIdx.m_bMlock = ( hIndex.GetInt ( "mlock", 0 )!=0 ) && !g_bOptNoLock;
  tIdx.m_bExpand = ( hIndex.GetInt ( "expand_keywords", 0 )!=0 );
  tIdx.m_bPreopen = ( hIndex.GetInt ( "preopen", 0 )!=0 );
  tIdx.m_sGlobalIDFPath = hIndex.GetStr ( "global_idf" );
  tIdx.m_bOnDiskAttrs = ( hIndex.GetInt ( "ondisk_attrs", 0 )==1 );
  tIdx.m_bOnDiskPools = ( strcmp ( hIndex.GetStr ( "ondisk_attrs", "" ), "pool" )==0 );
  tIdx.m_bOnDiskAttrs |= g_bOnDiskAttrs;
  tIdx.m_bOnDiskPools |= g_bOnDiskPools;
}
  • 调用PreCreatePlainIndex创建真正索引对象,对应的实际处理类是CSphIndex_VLN类对象,创建好对象后再做一些属性字段的设置。
void PreCreatePlainIndex ( ServedDesc_t & tServed, const char * sName )
{
  tServed.m_pIndex = sphCreateIndexPhrase ( sName, tServed.m_sIndexPath.cstr() );
  tServed.m_pIndex->m_bExpandKeywords = tServed.m_bExpand;
  tServed.m_pIndex->m_iExpansionLimit = g_iExpansionLimit;
  tServed.m_pIndex->SetPreopen ( tServed.m_bPreopen || g_bPreopenIndexes );
  tServed.m_pIndex->SetGlobalIDFPath ( tServed.m_sGlobalIDFPath );
  tServed.m_pIndex->SetMemorySettings ( tServed.m_bMlock, tServed.m_bOnDiskAttrs, tServed.m_bOnDiskPools );
  tServed.m_bEnabled = false;
}
CSphIndex * sphCreateIndexPhrase ( const char* szIndexName, const char * sFilename )
{
  return new CSphIndex_VLN ( szIndexName, sFilename );
}
  • 创建完成索引对象以后,根据配置文件配置缓存的大小来设置缓存的空间,最后把创建的索引上下文对象加入到全局索引对象g_pLocalIndexes对象中,因为一个searchd程序可以同时处理多个索引文件,因此需要一个容器来保存创建的索引对象。g_pLocalIndexes是一个索引哈希表,key为索引名称,value为索引上下文对象。

初始化索引对象

/// this gets called for every new physical index
/// that is, local and RT indexes, but not distributed once
bool PrereadNewIndex ( ServedDesc_t & tIdx, const CSphConfigSection & hIndex, const char * szIndexName )
{
    bool bOk = tIdx.m_pIndex->Prealloc ( g_bStripPath );
    if ( !bOk )
    {
        sphWarning ( "index '%s': prealloc: %s; NOT SERVING", szIndexName, tIdx.m_pIndex->GetLastError().cstr() );
        return false;
    }
}
  • 创建索引后紧接着就是调用PrereadNewIndex()函数对新创建的索引进行预读取数据加载操作了,在函数内部实际调用的是创建索引对象的CSphIndex_VLN::Prealloc()函数进行处理,这个函数内部处理的事情比较多,我们一步步分析看下,以下代码都是在CSphIndex_VLN::Prealloc函数中。
CSphEmbeddedFiles tEmbeddedFiles;
    // preload schema
    if ( !LoadHeader ( GetIndexFileName("sph").cstr(), bStripPath, tEmbeddedFiles, m_sLastWarning ) )
        return false;

    tEmbeddedFiles.Reset();

    // preopen
    if ( m_bKeepFilesOpen )
    {
        if ( m_tDoclistFile.Open ( GetIndexFileName("spd"), SPH_O_READ, m_sLastError ) < 0 )
            return false;

        if ( m_tHitlistFile.Open ( GetIndexFileName ( m_uVersion>=3 ? "spp" : "spd" ), SPH_O_READ, m_sLastError ) < 0 )
            return false;
    }

    /////////////////////
    // prealloc wordlist
    /////////////////////
    // might be no dictionary at this point for old index format
    bool bWordDict = m_pDict && m_pDict->GetSettings().m_bWordDict;

    // only checkpoint and wordlist infixes are actually read here; dictionary itself is just mapped
    if ( !m_tWordlist.Preread ( GetIndexFileName("spi").cstr() , m_uVersion, bWordDict, m_sLastError ) )
        return false;
}
  • 调用LoadHeader来读取sph文件,sph文件保存的是索引头部信息保存后续索引的配置属性方面的字段,读取设置这些关键字段为后续查询做准备。
  • 根据配置文件是否保持文件打开状态,来进行spd和spp文件的打开操作。如果此配置为关闭则每次真正查询的时候再打开文件。
  • 调用CWordlist::Preread函数读取spi文件里面的词信息加在到m_tWordlist变量,CWordlist里面保存一个词到spd文件中文档ID的地址偏移,后续根据这个查询词id对应的文档id列表。注意读取spi文件并不是读取全部词的内容信息,而是读取文件中的Checkopint数据块的信息和中缀索引的InfixHash数据块信息。根据Checkopint块信息建立一个关键词内存索引。再根据InfixHash数据建立一个基本词条到Checkopint的1对n的映射关系。如果配置了中缀索引,则查找某个关键词的时候先把关键词在InfixHash表里查找,然后根据Checkopint块中记录的wordid块的地址进行读取wordid信息进行定位查到。这样做的好处有节省内存,提高查找效率。
if ( m_tSettings.m_eDocinfo==SPH_DOCINFO_EXTERN && !m_bIsEmpty )
{
    /////////////
    // attr data
    /////////////

    int iStride = DOCINFO_IDSIZE + m_tSchema.GetRowSize();

    if ( !m_tAttr.Setup ( GetIndexFileName("spa").cstr(), m_sLastError, true ) )
        return false;

    int64_t iDocinfoSize = m_tAttr.GetLengthBytes();
    if ( iDocinfoSize<0 )
        return false;
    iDocinfoSize = iDocinfoSize / sizeof(DWORD);
    int64_t iRealDocinfoSize = m_iMinMaxIndex ? m_iMinMaxIndex : iDocinfoSize;
    m_iDocinfo = iRealDocinfoSize / iStride;

    m_iDocinfoIndex = ( ( iDocinfoSize - iRealDocinfoSize ) / iStride / 2 ) - 1;
    m_pDocinfoIndex = m_tAttr.GetWritePtr() + m_iMinMaxIndex;

    // prealloc docinfo hash but only if docinfo is big enough (in other words if hash is 8x+ less in size)
    if ( m_tAttr.GetLengthBytes() > ( 32 << DOCINFO_HASH_BITS ) && !m_bDebugCheck )
    {
        if ( !m_tDocinfoHash.Alloc ( ( 1 << DOCINFO_HASH_BITS )+4, m_sLastError ) )
            return false;
    }

    // MVA data
    if ( m_uVersion>=4 )
    {
        if ( !m_tMva.Setup ( GetIndexFileName("spm").cstr(), m_sLastError, false ) )
            return false;

        if ( m_tMva.GetNumEntries()>INT_MAX )
        {
            m_bArenaProhibit = true;
            sphWarning ( "MVA update disabled (loaded MVA " INT64_FMT ", should be less %d)", m_tMva.GetNumEntries(), INT_MAX );
        }
    }

    if ( m_uVersion>=17 && !m_tString.Setup ( GetIndexFileName("sps").cstr(), m_sLastError, true ) )
            return false;
}
  • 如果索引配置的是SPH_DOCINFO_EXTERN模式,则开始通过spa文件读取文档的属性信息加载到m_tAttr遍历,然后根据不同的版本号读取对应spm和sps文件信息。
// prealloc killlist
    if ( m_uVersion>=10 )
    {
        // FIXME!!! m_bId32to64
        if ( !m_tKillList.Setup ( GetIndexFileName("spk").cstr(), m_sLastError, false ) )
            return false;
    }

    // prealloc skiplist
    if ( !m_bDebugCheck && m_bHaveSkips && !m_tSkiplists.Setup ( GetIndexFileName("spe").cstr(), m_sLastError, false ) )
            return false;

    // almost done
    m_bPassedAlloc = true;
    m_iIndexTag = ++m_iIndexTagSeq;
  • 继续根据不同的版本号读取对应的spk、spe等信息,基本上就是对应我们创建索引时产生的各个文件信息。读取完成这些信息后索引对象的初始化就已经完成可以提供基本的查询逻辑了。

根据关键词查询索引

/// one regular query vs many sorters
bool CSphIndex_VLN::MultiQuery ( const CSphQuery * pQuery, CSphQueryResult * pResult,
    int iSorters, ISphMatchSorter ** ppSorters, const CSphMultiQueryArgs & tArgs ) const
  • 真正实现查询关键词的函数是CSphIndex_VLN::MultiQuery函数,该函数有五个参数:
    • pQuery 封装了查询所需要一系列的参数
    • pResult 查询结果的返回值
    • ppSorters 提供一个 排序 器之类的东东,是一个二维数组。
    • iSorters 表示ppSorters数据的大小
    • tArgs 提供多值查询的参数信息
XQQuery_t tParsed;
    if ( !sphParseExtendedQuery ( tParsed, (const char*)sModifiedQuery, pQuery, m_pQueryTokenizer, &m_tSchema, pDict, m_tSettings ) )
    {
        // FIXME? might wanna reset profile to unknown state
        pResult->m_sError = tParsed.m_sParseError;
        return false;
    }
    if ( !tParsed.m_sParseWarning.IsEmpty() )
        pResult->m_sWarning = tParsed.m_sParseWarning;

    // transform query if needed (quorum transform, etc.)
    if ( pProfile )
        pProfile->Switch ( SPH_QSTATE_TRANSFORMS );
    sphTransformExtendedQuery ( &tParsed.m_pRoot, m_tSettings, pQuery->m_bSimplify, this );

    if ( m_bExpandKeywords )
    {
        tParsed.m_pRoot = sphQueryExpandKeywords ( tParsed.m_pRoot, m_tSettings );
        tParsed.m_pRoot->Check ( true );
    }

    // this should be after keyword expansion
    if ( m_tSettings.m_uAotFilterMask )
        TransformAotFilter ( tParsed.m_pRoot, pDict->GetWordforms(), m_tSettings );

    SphWordStatChecker_t tStatDiff;
    tStatDiff.Set ( pResult->m_hWordStats );

    // expanding prefix in word dictionary case
    CSphScopedPayload tPayloads;
    XQNode_t * pPrefixed = ExpandPrefix ( tParsed.m_pRoot, pResult, &tPayloads, pQuery->m_uDebugFlags );
    if ( !pPrefixed )
        return false;
    tParsed.m_pRoot = pPrefixed;

    if ( !sphCheckQueryHeight ( tParsed.m_pRoot, pResult->m_sError ) )
        return false;

    // flag common subtrees
    int iCommonSubtrees = 0;
    if ( m_iMaxCachedDocs && m_iMaxCachedHits )
        iCommonSubtrees = sphMarkCommonSubtrees ( 1, &tParsed );

    tParsed.m_bNeedSZlist = pQuery->m_bZSlist;

    CSphQueryNodeCache tNodeCache ( iCommonSubtrees, m_iMaxCachedDocs, m_iMaxCachedHits );
    bool bResult = ParsedMultiQuery ( pQuery, pResult, iSorters, &dSorters[0], tParsed, pDict, tArgs, &tNodeCache, tStatDiff );
  • 用tParsed解析输入的查询词构造一棵查询树,该树是一棵N叉树,每个叶子节点代表一个关键词信息,中间节点类似一种与或关系运算之类的东东。该查询树的具体数据结构需要深入调试代码才能容易理解些,目前因为时间关系只是大概了解该树的基本模型,可以看成树的每个叶子节点都是一个查询词,叶子的父节点是一种对应关系操作符,用来对查询词的结果做逻辑运算使用。查询树构造好以后在做一些设置检查和参数的封装,最后调用ParsedMultiQuery函数来进行下一步操作。
  • 这里要注意下ExpandPrefix函数的调用,如果使用通配符,一般是配置启用中缀索引功能的时候,会进入这个函数通过开始创建索引时候根据spi文件构建的中缀索引哈希表进行关键词的定位查询,如果没有启用中缀索引此函数会直接返回。
// open files
    CSphAutofile tDoclist, tHitlist;
    if ( !m_bKeepFilesOpen )
    {
        if ( pProfile )
            pProfile->Switch ( SPH_QSTATE_OPEN );

        if ( tDoclist.Open ( GetIndexFileName("spd"), SPH_O_READ, pResult->m_sError ) < 0 )
            return false;

        if ( tHitlist.Open ( GetIndexFileName ( m_uVersion>=3 ? "spp" : "spd" ), SPH_O_READ, pResult->m_sError ) < 0 )
            return false;
    }
  • 检查初始化的时候有没有打开spd和spp文件句柄,没有的话打开一个临时文件句柄供后续查询使用。
// setup search terms
    DiskIndexQwordSetup_c tTermSetup ( m_bKeepFilesOpen ? m_tDoclistFile : tDoclist,
        m_bKeepFilesOpen ? m_tHitlistFile : tHitlist,
        m_tSkiplists.GetWritePtr(), pProfile );

    tTermSetup.m_pDict = pDict;
    tTermSetup.m_pIndex = this;
    tTermSetup.m_eDocinfo = m_tSettings.m_eDocinfo;
    tTermSetup.m_uMinDocid = m_uMinDocid;
    if ( m_tSettings.m_eDocinfo==SPH_DOCINFO_INLINE )
    {
        tTermSetup.m_iInlineRowitems = m_tSchema.GetRowSize();
        tTermSetup.m_pMinRow = m_dMinRow.Begin();
    }
    tTermSetup.m_iDynamicRowitems = ppSorters[iMaxSchemaIndex]->GetSchema().GetDynamicSize();

    if ( pQuery->m_uMaxQueryMsec>0 )
        tTermSetup.m_iMaxTimer = sphMicroTimer() + pQuery->m_uMaxQueryMsec*1000; // max_query_time
    tTermSetup.m_pWarning = &pResult->m_sWarning;
    tTermSetup.m_bSetupReaders = true;
    tTermSetup.m_pCtx = &tCtx;
    tTermSetup.m_pNodeCache = pNodeCache;

    // setup prediction constrain
    CSphQueryStats tQueryStats;
    bool bCollectPredictionCounters = ( pQuery->m_iMaxPredictedMsec>0 );
    int64_t iNanoBudget = (int64_t)(pQuery->m_iMaxPredictedMsec) * 1000000; // from milliseconds to nanoseconds
    tQueryStats.m_pNanoBudget = &iNanoBudget;
    if ( bCollectPredictionCounters )
        tTermSetup.m_pStats = &tQueryStats;
  • 创建一个查询关键词的配置器,用于记录查询词条上下文设置。
// setup query
    // must happen before index-level reject, in order to build proper keyword stats
    CSphScopedPtr<ISphRanker> pRanker ( sphCreateRanker ( tXQ, pQuery, pResult, tTermSetup, tCtx, ppSorters[iMaxSchemaIndex]->GetSchema() ) );
    if ( !pRanker.Ptr() )
        return false;
  • 创建一个排序对象,注意这一步非常的关键,对查询关键词ID在spi文件的定位以及对spd文件文档ID的定位都是在sphCreateRanker创建pRanker的构造函数隐式调用完成的,下面把调用关键流程代码贴出。
case SPH_RANK_MATCHANY:        pRanker = new ExtRanker_T < RankerState_MatchAny_fn > ( tXQ, tTermSetup ); break;
  • 根据不同的查询模式,创建不同的排序类对象,默认使用的SPH_RANK_MATCHANY模式。
ExtRanker_c::ExtRanker_c ( const XQQuery_t & tXQ, const ISphQwordSetup & tSetup )
    : m_dZoneInfo ( 0 )
    {
    assert ( tSetup.m_pCtx );

    m_iInlineRowitems = tSetup.m_iInlineRowitems;
    for ( int i=0; i<ExtNode_i::MAX_DOCS; i++ )
    {
        m_dMatches[i].Reset ( tSetup.m_iDynamicRowitems );
        m_dMyMatches[i].Reset ( tSetup.m_iDynamicRowitems );
    }
    m_tTestMatch.Reset ( tSetup.m_iDynamicRowitems );

    assert ( tXQ.m_pRoot );
    tSetup.m_pZoneChecker = this;
    m_pRoot = ExtNode_i::Create ( tXQ.m_pRoot, tSetup );
    }
  • 在创建类对象构造函数里面,根据传进来的查询树和查询词的上下文通过ExtNode_i::Create函数创建一个ExtNode_i对象。
ExtNode_i * ExtNode_i::Create ( const XQNode_t * pNode, const ISphQwordSetup & tSetup )
    {
        // generic create
        ExtNode_i * pCur = NULL;
        for ( int i=0; i<iChildren; i++ )
        {
            ExtNode_i * pNext = ExtNode_i::Create ( pNode->m_dChildren[i], tSetup );
            if ( !pNext ) continue;
            if ( !pCur )
            {
                pCur = pNext;
                continue;
            }
            switch ( pNode->GetOp() )
            {
                case SPH_QUERY_OR:            pCur = new ExtOr_c ( pCur, pNext, tSetup ); break;
                case SPH_QUERY_MAYBE:        pCur = new ExtMaybe_c ( pCur, pNext, tSetup ); break;
                case SPH_QUERY_AND:            pCur = new ExtAnd_c ( pCur, pNext, tSetup ); break;
                case SPH_QUERY_ANDNOT:        pCur = new ExtAndNot_c ( pCur, pNext, tSetup ); break;
                case SPH_QUERY_SENTENCE:    pCur = new ExtUnit_c ( pCur, pNext, pNode->m_dSpec.m_dFieldMask, tSetup, MAGIC_WORD_SENTENCE ); break;
                case SPH_QUERY_PARAGRAPH:    pCur = new ExtUnit_c ( pCur, pNext, pNode->m_dSpec.m_dFieldMask, tSetup, MAGIC_WORD_PARAGRAPH ); break;
                default:                    assert ( 0 && "internal error: unhandled op in ExtNode_i::Create()" ); break;
            }
        }
        if ( pCur && pNode->GetCount() )
            return tSetup.m_pNodeCache->CreateProxy ( pCur, pNode, tSetup );
        return pCur;
    }
  • 在ExtNode_i::Create函数里面会根据查询树的每一个子节点分别创建对应子节点ExtNode_i对象,注意查询树的每一个字节都是一个关键查询词,在此处调用的是ExtNode_i::Create的不同参数的重载函数。
  • 注意如果是中间节点,中间节点一般对应的是各种关系操作,会创建不同的关系操作对象。
ExtNode_i * ExtNode_i::Create ( const XQKeyword_t & tWord, const XQNode_t * pNode, const ISphQwordSetup & tSetup )
{
    return Create ( CreateQueryWord ( tWord, tSetup ), pNode, tSetup );
}
  • 上一步对查询树每个子节点调用的ExtNode_i::Create函数里面调用的是CreateQueryWord()函数。
static ISphQword * CreateQueryWord ( const XQKeyword_t & tWord, const ISphQwordSetup & tSetup, CSphDict * pZonesDict=NULL )
{
    BYTE sTmp [ 3*SPH_MAX_WORD_LEN + 16 ];
    strncpy ( (char*)sTmp, tWord.m_sWord.cstr(), sizeof(sTmp) );
    sTmp[sizeof(sTmp)-1] = '\0';

    ISphQword * pWord = tSetup.QwordSpawn ( tWord );
    pWord->m_sWord = tWord.m_sWord;
    CSphDict * pDict = pZonesDict ? pZonesDict : tSetup.m_pDict;
    pWord->m_uWordID = tWord.m_bMorphed
        ? pDict->GetWordIDNonStemmed ( sTmp )
        : pDict->GetWordID ( sTmp );
    pWord->m_sDictWord = (char*)sTmp;
    pWord->m_bExpanded = tWord.m_bExpanded;
    tSetup.QwordSetup ( pWord );

    if ( tWord.m_bFieldStart && tWord.m_bFieldEnd )    pWord->m_iTermPos = TERM_POS_FIELD_STARTEND;
    else if ( tWord.m_bFieldStart )                    pWord->m_iTermPos = TERM_POS_FIELD_START;
    else if ( tWord.m_bFieldEnd )                    pWord->m_iTermPos = TERM_POS_FIELD_END;
    else                                            pWord->m_iTermPos = TERM_POS_NONE;

    pWord->m_fBoost = tWord.m_fBoost;
    pWord->m_iAtomPos = tWord.m_iAtomPos;
    return pWord;
}
  • 在CreateQueryWord函数里面首先会定义个ISphQword对象,并设置参数的上下文,最终调用tSetup.QwordSetup函数进行下一步操作,对应的调用函数是DiskIndexQwordSetup_c::QwordSetup函数。
bool DiskIndexQwordSetup_c::QwordSetup ( ISphQword * pWord ) const
{
    DiskIndexQwordTraits_c * pMyWord = (DiskIndexQwordTraits_c*)pWord;

    // setup attrs
    pMyWord->m_tDoc.Reset ( m_iDynamicRowitems );
    pMyWord->m_iMinID = m_uMinDocid;
    pMyWord->m_tDoc.m_uDocID = m_uMinDocid;

    return pMyWord->Setup ( this );
}
  • 在DiskIndexQwordSetup_c::QwordSetup函数里面继续做转化调用DiskIndexQwordTraits_c::Setup()函数处理。
bool DiskIndexQwordSetup_c::Setup ( ISphQword * pWord ) const
{
    const CSphWordlistCheckpoint * pCheckpoint = pIndex->m_tWordlist.FindCheckpoint ( sWord, iWordLen, tWord.m_uWordID, false );
    if ( !pCheckpoint )
        return false;

    // decode wordlist chunk
    const BYTE * pBuf = pIndex->m_tWordlist.AcquireDict ( pCheckpoint );
    assert ( pBuf );

    CSphDictEntry tRes;
    if ( bWordDict )
    {
        KeywordsBlockReader_c tCtx ( pBuf, m_pSkips!=NULL );
        while ( tCtx.UnpackWord() )
        {
            // block is sorted
            // so once keywords are greater than the reference word, no more matches
            assert ( tCtx.GetWordLen()>0 );
            int iCmp = sphDictCmpStrictly ( sWord, iWordLen, tCtx.GetWord(), tCtx.GetWordLen() );
            if ( iCmp<0 )
                return false;
            if ( iCmp==0 )
                break;
        }
        if ( tCtx.GetWordLen()<=0 )
            return false;
        tRes = tCtx;

    } else
    {
        if ( !pIndex->m_tWordlist.GetWord ( pBuf, tWord.m_uWordID, tRes ) )
            return false;
    }

        const ESphHitless eMode = pIndex->m_tSettings.m_eHitless;
    tWord.m_iDocs = eMode==SPH_HITLESS_SOME ? ( tRes.m_iDocs & HITLESS_DOC_MASK ) : tRes.m_iDocs;
    tWord.m_iHits = tRes.m_iHits;
    tWord.m_bHasHitlist =
        ( eMode==SPH_HITLESS_NONE ) ||
        ( eMode==SPH_HITLESS_SOME && !( tRes.m_iDocs & HITLESS_DOC_FLAG ) );

    if ( m_bSetupReaders )
    {
        tWord.m_rdDoclist.SetBuffers ( g_iReadBuffer, g_iReadUnhinted );
        tWord.m_rdDoclist.SetFile ( m_tDoclist );
        tWord.m_rdDoclist.m_pProfile = m_pProfile;
        tWord.m_rdDoclist.m_eProfileState = SPH_QSTATE_READ_DOCS;

        // read in skiplist
        // OPTIMIZE? maybe cache hot decompressed lists?
        // OPTIMIZE? maybe add an option to decompress on preload instead?
        if ( m_pSkips && tRes.m_iDocs>SPH_SKIPLIST_BLOCK )
        {
            const BYTE * pSkip = m_pSkips + tRes.m_iSkiplistOffset;

            tWord.m_dSkiplist.Add();
            tWord.m_dSkiplist.Last().m_iBaseDocid = 0;
            tWord.m_dSkiplist.Last().m_iOffset = tRes.m_iDoclistOffset;
            tWord.m_dSkiplist.Last().m_iBaseHitlistPos = 0;

            for ( int i=1; i<( tWord.m_iDocs/SPH_SKIPLIST_BLOCK ); i++ )
            {
                SkiplistEntry_t & t = tWord.m_dSkiplist.Add();
                SkiplistEntry_t & p = tWord.m_dSkiplist [ tWord.m_dSkiplist.GetLength()-2 ];
                t.m_iBaseDocid = p.m_iBaseDocid + SPH_SKIPLIST_BLOCK + (SphDocID_t) sphUnzipOffset ( pSkip );
                t.m_iOffset = p.m_iOffset + 4*SPH_SKIPLIST_BLOCK + sphUnzipOffset ( pSkip );
                t.m_iBaseHitlistPos = p.m_iBaseHitlistPos + sphUnzipOffset ( pSkip );
            }
        }

        tWord.m_rdDoclist.SeekTo ( tRes.m_iDoclistOffset, tRes.m_iDoclistHint );

        tWord.m_rdHitlist.SetBuffers ( g_iReadBuffer, g_iReadUnhinted );
        tWord.m_rdHitlist.SetFile ( m_tHitlist );
        tWord.m_rdHitlist.m_pProfile = m_pProfile;
        tWord.m_rdHitlist.m_eProfileState = SPH_QSTATE_READ_HITS;
    }
}
  • DiskIndexQwordSetup_c::Setup函数是对每个查询词处理的关键所在,首先通过索引对戏加在spi文件的m_tWordlist变量对象FindCheckpoint函数找到该查询词对应的Checkpoint节点,还记得创建索引的时候我们说过通过Checkpoint节点记录的信息,可以找到该词在spi文件记录的位置,进而定位到对应spd文件的该词对应文档ID列表的位置信息。
  • pIndex->m_tWordlist.AcquireDict ( pCheckpoint )根据获取的pCheckpoint,来获取该词对应spi文件所在块的文件指针。
  • 根据不同的记录方式,再定位所在块的地址在查找该词记的CSphDictEntry信息结构体,回忆一下该结构体的定义。
/// dictionary entry
/// some of the fields might be unused depending on specific dictionary type
struct CSphDictEntry
{
    SphWordID_t        m_uWordID;            ///< keyword id (for dict=crc)
    const BYTE *    m_sKeyword;            ///< keyword text (for dict=keywords)
    int                m_iDocs;            ///< number of matching documents
    int                m_iHits;            ///< number of occurrences
    SphOffset_t        m_iDoclistOffset;    ///< absolute document list offset (into .spd)
    SphOffset_t        m_iDoclistLength;    ///< document list length in bytes
    SphOffset_t        m_iSkiplistOffset;    ///< absolute skiplist offset (into .spe)
    int                m_iDoclistHint;        ///< raw document list length hint value (0..255 range, 1 byte)
};
  • 取到查询词对应的CSphDictEntry信息后,我们就拿到最关键的信息了,可以当看到CSphDictEntry里面记录了该词命中的m_iDocs个数,m_iHits个数,对应spd文件文档列表的地址偏移m_iDoclistOffset,以及文档的长度m_iDoclistLength等等一些重要的字段信息。
  • 获取到查询词这些关键信息以后,就可以读取对应的文档id列表了,然后根据查询树词与词之间的关系做运算,比如如果是and那就取交集,即可得到查询关键词对应的文档id数据了。
  • 因时间有限索引查询部分先分析到这里,后续主要就是根据获取的文档ID,做一些排序打分各种优先性计算了,不是关键所在,等后续这个需求完成在完善补充更深入的理解。

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持 码农网

查看所有标签

猜你喜欢:

本站部分资源来源于网络,本站转载出于传递更多信息之目的,版权归原作者或者来源机构所有,如转载稿涉及版权问题,请联系我们

Practical JavaScript, DOM Scripting and Ajax Projects

Practical JavaScript, DOM Scripting and Ajax Projects

Frank Zammetti / Apress / April 16, 2007 / $44.99

http://www.amazon.com/exec/obidos/tg/detail/-/1590598164/ Book Description Practical JavaScript, DOM, and Ajax Projects is ideal for web developers already experienced in JavaScript who want to ......一起来看看 《Practical JavaScript, DOM Scripting and Ajax Projects》 这本书的介绍吧!

JSON 在线解析
JSON 在线解析

在线 JSON 格式化工具

MD5 加密
MD5 加密

MD5 加密工具

正则表达式在线测试
正则表达式在线测试

正则表达式在线测试