Эх сурвалжийг харах

merged rel21 branch (up to r4668) back into trunk

git-svn-id: svn://svn.sphinxsearch.com/sphinx/trunk@4669 406a0c4d-033a-0410-8de8-e80135713968
tomat 12 жил өмнө
parent
commit
7bb91aa454

+ 9 - 2
src/searchd.cpp

@@ -15019,6 +15019,7 @@ void HandleMysqlInsert ( SqlRowBuffer_c & tOut, const SqlStmt_t & tStmt,
 	// fire exit
 	if ( !sError.IsEmpty() )
 	{
+		pIndex->RollBack(); // clean up collected data
 		pServed->Unlock();
 		tOut.Error ( tStmt.m_sStmt, sError.cstr() );
 		return;
@@ -18237,6 +18238,7 @@ bool RotateIndexGreedy ( ServedIndex_t & tIndex, const char * sIndex )
 	}
 	sphLogDebug ( "RotateIndexGreedy: new index is readable" );
 
+	bool bNoMVP = true;
 	if ( !tIndex.m_bOnlyNew )
 	{
 		// rename current to old
@@ -18262,7 +18264,7 @@ bool RotateIndexGreedy ( ServedIndex_t & tIndex, const char * sIndex )
 
 			CSphString sFakeError;
 			CSphAutofile fdTest ( sBuf, SPH_O_READ, sFakeError );
-			bool bNoMVP = ( fdTest.GetFD()<0 );
+			bNoMVP = ( fdTest.GetFD()<0 );
 			fdTest.Close();
 			if ( bNoMVP )
 				break; ///< no file, nothing to hold
@@ -18291,9 +18293,14 @@ bool RotateIndexGreedy ( ServedIndex_t & tIndex, const char * sIndex )
 
 		// rollback old ones
 		if ( !tIndex.m_bOnlyNew )
-			for ( int j=0; j<=sphGetExtCount ( uVersion ); j++ ) ///< <=, not <, since we have the .mvp at the end of these lists
+		{
+			for ( int j=0; j<sphGetExtCount ( uVersion ); j++ )
 				TryRename ( sIndex, sPath, sphGetExts ( SPH_EXT_TYPE_OLD, uVersion )[j], sphGetExts ( SPH_EXT_TYPE_CUR, uVersion )[j], sAction, true );
 
+			if ( !bNoMVP )
+				TryRename ( sIndex, sPath, sphGetExt ( SPH_EXT_TYPE_OLD, SPH_EXT_MVP ), sphGetExt ( SPH_EXT_TYPE_CUR, SPH_EXT_MVP ), sAction, true );
+		}
+
 		return false;
 	}
 	sphLogDebug ( "RotateIndexGreedy: New renamed to current" );

+ 26 - 30
src/sphinx.cpp

@@ -16105,6 +16105,8 @@ void CSphIndex_VLN::Dealloc ()
 	m_tSettings.m_eDocinfo = SPH_DOCINFO_NONE;
 
 	m_bPreallocated = false;
+	SafeDelete ( m_pFieldFilter );
+	SafeDelete ( m_pQueryTokenizer );
 	SafeDelete ( m_pTokenizer );
 	SafeDelete ( m_pDict );
 
@@ -25911,25 +25913,25 @@ void CSphSource_Document::BuildSubstringHits ( SphDocID_t uDocid, bool bPayload,
 	// and compute fields lengths
 	if ( !bSkipEndMarker && !m_tState.m_bProcessingHits && m_tHits.Length() )
 	{
-		CSphWordHit * pHit = const_cast < CSphWordHit * > ( m_tHits.Last() );
-		Hitpos_t uRefPos = pHit->m_uWordPos;
+		CSphWordHit * pTail = const_cast < CSphWordHit * > ( m_tHits.Last() );
 
 		if ( m_pFieldLengthAttrs )
-			m_pFieldLengthAttrs [ HITMAN::GetField ( pHit->m_uWordPos ) ] = HITMAN::GetPos ( pHit->m_uWordPos );
-
-		for ( ; pHit>=m_tHits.First() && pHit->m_uWordPos==uRefPos; pHit-- )
-			HITMAN::SetEndMarker ( &pHit->m_uWordPos );
+			m_pFieldLengthAttrs [ HITMAN::GetField ( pTail->m_uWordPos ) ] = HITMAN::GetPos ( pTail->m_uWordPos );
 
-		// mark blended HEAD as trailing too
+		Hitpos_t uEndPos = pTail->m_uWordPos;
 		if ( iBlendedHitsStart>=0 )
 		{
 			assert ( iBlendedHitsStart>=0 && iBlendedHitsStart<m_tHits.Length() );
-			pHit = const_cast < CSphWordHit * > ( m_tHits.First()+iBlendedHitsStart );
-			uRefPos = pHit->m_uWordPos;
+			Hitpos_t uBlendedPos = ( m_tHits.First() + iBlendedHitsStart )->m_uWordPos;
+			uEndPos = Min ( uEndPos, uBlendedPos );
+		}
 
-			const CSphWordHit * pEnd = m_tHits.First()+m_tHits.Length();
-			for ( ; pHit<pEnd && pHit->m_uWordPos==uRefPos; pHit++ )
-				HITMAN::SetEndMarker ( &pHit->m_uWordPos );
+		// set end marker for all tail hits
+		const CSphWordHit * pStart = m_tHits.First();
+		while ( pStart<=pTail && uEndPos<=pTail->m_uWordPos )
+		{
+			HITMAN::SetEndMarker ( &pTail->m_uWordPos );
+			pTail--;
 		}
 	}
 }
@@ -25950,7 +25952,6 @@ void CSphSource_Document::BuildRegularHits ( SphDocID_t uDocid, bool bPayload, b
 
 	// FIELDEND_MASK at last token stream should be set for HEAD token too
 	int iBlendedHitsStart = -1;
-	int iLastTokenStart = -1;
 
 	// index words only
 	while ( ( m_iMaxHits==0 || m_tHits.m_dData.GetLength()+BUILD_REGULAR_HITS_COUNT<m_iMaxHits )
@@ -25967,9 +25968,6 @@ void CSphSource_Document::BuildRegularHits ( SphDocID_t uDocid, bool bPayload, b
 				HITMAN::AddPos ( &m_tState.m_iHitPos, m_iBoundaryStep );
 		}
 
-		if ( m_tState.m_iBuildLastStep )
-			iLastTokenStart = m_tHits.Length();
-
 		if ( BuildZoneHits ( uDocid, sWord ) )
 			continue;
 
@@ -26029,27 +26027,25 @@ void CSphSource_Document::BuildRegularHits ( SphDocID_t uDocid, bool bPayload, b
 	// and compute field lengths
 	if ( !bSkipEndMarker && !m_tState.m_bProcessingHits && m_tHits.Length() )
 	{
-		CSphWordHit * pHit = const_cast < CSphWordHit * > ( m_tHits.Last() );
-		HITMAN::SetEndMarker ( &pHit->m_uWordPos );
+		CSphWordHit * pTail = const_cast < CSphWordHit * > ( m_tHits.Last() );
 
 		if ( m_pFieldLengthAttrs )
-			m_pFieldLengthAttrs [ HITMAN::GetField ( pHit->m_uWordPos ) ] = HITMAN::GetPos ( pHit->m_uWordPos );
+			m_pFieldLengthAttrs [ HITMAN::GetField ( pTail->m_uWordPos ) ] = HITMAN::GetPos ( pTail->m_uWordPos );
 
-		// mark blended HEAD as trailing too
+		Hitpos_t uEndPos = pTail->m_uWordPos;
 		if ( iBlendedHitsStart>=0 )
 		{
 			assert ( iBlendedHitsStart>=0 && iBlendedHitsStart<m_tHits.Length() );
-			CSphWordHit * pBlendedHit = const_cast < CSphWordHit * > ( m_tHits.First() + iBlendedHitsStart );
-			HITMAN::SetEndMarker ( &pBlendedHit->m_uWordPos );
-		} else if ( iLastTokenStart>=0 && iLastTokenStart+1<m_tHits.Length() )
+			Hitpos_t uBlendedPos = ( m_tHits.First() + iBlendedHitsStart )->m_uWordPos;
+			uEndPos = Min ( uEndPos, uBlendedPos );
+		}
+
+		// set end marker for all tail hits
+		const CSphWordHit * pStart = m_tHits.First();
+		while ( pStart<=pTail && uEndPos<=pTail->m_uWordPos )
 		{
-			CSphWordHit * pHit = const_cast < CSphWordHit * > ( m_tHits.First() + iLastTokenStart );
-			const CSphWordHit * pEnd = m_tHits.First() + m_tHits.Length() - 1;
-			while ( pHit<pEnd )
-			{
-				HITMAN::SetEndMarker ( &pHit->m_uWordPos );
-				pHit++;
-			}
+			HITMAN::SetEndMarker ( &pTail->m_uWordPos );
+			pTail--;
 		}
 	}
 }

+ 4 - 3
src/sphinxquery.cpp

@@ -767,14 +767,15 @@ int XQParser_t::GetToken ( YYSTYPE * lvalp )
 			&& ( *p=='\0' || sphIsSpace(*p) || IsSpecial(*p) ) )
 		{
 			// float as quorum argument has higher precedence than blended
-			bool bQuorumPercent = ( m_iQuorumQuote==m_iQuorumFSlash && m_iQuorumFSlash==m_iAtomPos && iDots==1 );
+			bool bQuorum = ( m_iQuorumQuote==m_iQuorumFSlash && m_iQuorumFSlash==m_iAtomPos );
+			bool bQuorumPercent = ( bQuorum && iDots==1 );
 
 			bool bTok = ( m_pTokenizer->GetToken()!=NULL );
-			if ( bTok && m_pTokenizer->TokenIsBlended() && !bQuorumPercent ) // number with blended should be tokenized as usual
+			if ( bTok && m_pTokenizer->TokenIsBlended() && !( bQuorum || bQuorumPercent ) ) // number with blended should be tokenized as usual
 			{
 				m_pTokenizer->SkipBlended();
 				m_pTokenizer->SetBufferPtr ( m_pLastTokenStart );
-			} else if ( bTok && m_pTokenizer->WasTokenSynonym() && !bQuorumPercent )
+			} else if ( bTok && m_pTokenizer->WasTokenSynonym() && !( bQuorum || bQuorumPercent ) )
 			{
 				m_pTokenizer->SetBufferPtr ( m_pLastTokenStart );
 			} else

+ 6 - 0
src/sphinxrt.cpp

@@ -2824,6 +2824,7 @@ void RtIndex_t::Commit ( int * pDeleted )
 		pAcc->m_pIndex = NULL;
 		pAcc->m_dAccumRows.Resize ( 0 );
 		pAcc->m_dStrings.Resize ( 1 );
+		pAcc->m_dMvas.Resize ( 1 );
 		pAcc->m_dPerDocHitsCount.Resize ( 0 );
 		pAcc->ResetDict();
 		return;
@@ -2851,6 +2852,7 @@ void RtIndex_t::Commit ( int * pDeleted )
 	pAcc->m_dAccum.Resize ( 0 );
 	pAcc->m_dAccumRows.Resize ( 0 );
 	pAcc->m_dStrings.Resize ( 1 ); // handle dummy zero offset
+	pAcc->m_dMvas.Resize ( 1 );
 	pAcc->m_dPerDocHitsCount.Resize ( 0 );
 	pAcc->ResetDict();
 
@@ -3225,6 +3227,10 @@ void RtIndex_t::RollBack ()
 	// clean up parts we no longer need
 	pAcc->m_dAccum.Resize ( 0 );
 	pAcc->m_dAccumRows.Resize ( 0 );
+	pAcc->m_dStrings.Resize ( 1 ); // handle dummy zero offset
+	pAcc->m_dMvas.Resize ( 1 );
+	pAcc->m_dPerDocHitsCount.Resize ( 0 );
+	pAcc->ResetDict();
 
 	// finish cleaning up and release accumulator
 	pAcc->m_pIndex = NULL;

+ 25 - 10
src/sphinxsearch.cpp

@@ -1388,8 +1388,19 @@ static ExtNode_i * CreateMultiNode ( const XQNode_t * pQueryNode, const ISphQwor
 		CSphVector<ExtNode_i *> dNodes;
 		ARRAY_FOREACH ( i, pQueryNode->m_dChildren )
 		{
-			dNodes.Add ( ExtNode_i::Create ( pQueryNode->m_dChildren[i], tSetup ) );
-			assert ( dNodes.Last()->m_iAtomPos>=0 );
+			ExtNode_i * pTerm = ExtNode_i::Create ( pQueryNode->m_dChildren[i], tSetup );
+			assert ( !pTerm || pTerm->m_iAtomPos>=0 );
+			if ( pTerm )
+				dNodes.Add ( pTerm );
+		}
+
+		if ( dNodes.GetLength()<2 )
+		{
+			ARRAY_FOREACH ( i, dNodes )
+				SafeDelete ( dNodes[i] );
+			if ( tSetup.m_pWarning )
+				tSetup.m_pWarning->SetSprintf ( "can't create phrase node, hitlists unavailable (hitlists=%d, nodes=%d)", dNodes.GetLength(), pQueryNode->m_dChildren.GetLength() );
+			return NULL;
 		}
 
 		// FIXME! tricky combo again
@@ -1476,7 +1487,7 @@ static ExtNode_i * CreateOrderNode ( const XQNode_t * pNode, const ISphQwordSetu
 	ARRAY_FOREACH ( i, pNode->m_dChildren )
 	{
 		ExtNode_i * pChild = ExtNode_i::Create ( pNode->m_dChildren[i], tSetup );
-		if ( pChild->GotHitless() )
+		if ( !pChild || pChild->GotHitless() )
 		{
 			if ( tSetup.m_pWarning )
 				tSetup.m_pWarning->SetSprintf ( "failed to create order node, hitlist unavailable" );
@@ -1929,13 +1940,15 @@ ExtNode_i * ExtNode_i::Create ( const XQNode_t * pNode, const ISphQwordSetup & t
 				for ( int i=0; i<iChildren; i++ )
 				{
 					const XQNode_t * pChild = pNode->m_dChildren[i];
-					dTerms.Add ( ExtNode_i::Create ( pChild, tSetup ) );
+					ExtNode_i * pTerm = ExtNode_i::Create ( pChild, tSetup );
+					if ( pTerm )
+						dTerms.Add ( pTerm );
 				}
 
 				dTerms.Sort ( ExtNodeTF_fn() );
 
 				ExtNode_i * pCur = dTerms[0];
-				for ( int i=1; i<iChildren; i++ )
+				for ( int i=1; i<dTerms.GetLength(); i++ )
 					pCur = new ExtAndZonespan_c ( pCur, dTerms[i], tSetup, pNode->m_dChildren[0] );
 
 // For zonespan we have also Extra data which is not (yet?) covered by common-node optimization.
@@ -1949,13 +1962,15 @@ ExtNode_i * ExtNode_i::Create ( const XQNode_t * pNode, const ISphQwordSetup & t
 				for ( int i=0; i<iChildren; i++ )
 				{
 					const XQNode_t * pChild = pNode->m_dChildren[i];
-					dTerms.Add ( ExtNode_i::Create ( pChild, tSetup ) );
+					ExtNode_i * pTerm = ExtNode_i::Create ( pChild, tSetup );
+					if ( pTerm )
+						dTerms.Add ( pTerm );
 				}
 
 				dTerms.Sort ( ExtNodeTF_fn() );
 
 				ExtNode_i * pCur = dTerms[0];
-				for ( int i=1; i<iChildren; i++ )
+				for ( int i=1; i<dTerms.GetLength(); i++ )
 					pCur = new ExtAnd_c ( pCur, dTerms[i], tSetup );
 
 				if ( pNode->GetCount() )
@@ -1993,7 +2008,7 @@ ExtNode_i * ExtNode_i::Create ( const XQNode_t * pNode, const ISphQwordSetup & t
 				default:					assert ( 0 && "internal error: unhandled op in ExtNode_i::Create()" ); break;
 			}
 		}
-		if ( pNode->GetCount() )
+		if ( pCur && pNode->GetCount() )
 			return tSetup.m_pNodeCache->CreateProxy ( pCur, pNode, tSetup );
 		return pCur;
 	}
@@ -5171,13 +5186,13 @@ int ExtUnit_c::FilterHits ( int iMyHit, DWORD uSentenceEnd, SphDocID_t uDocid, i
 			{
 				m_dMyHits[iMyHit++] = *m_pHit1++;
 				if ( m_pHit1->m_uDocid==DOCID_MAX )
-					m_pHit1 = m_pArg1->GetHitsChunk ( m_pDocs1, 0 );
+					m_pHit1 = m_pArg1->GetHitsChunk ( m_pDocs1, DOCID_MAX );
 
 			} else
 			{
 				m_dMyHits[iMyHit++] = *m_pHit2++;
 				if ( m_pHit2->m_uDocid==DOCID_MAX )
-					m_pHit2 = m_pArg2->GetHitsChunk ( m_pDocs2, 0 );
+					m_pHit2 = m_pArg2->GetHitsChunk ( m_pDocs2, DOCID_MAX );
 			}
 
 		} else

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 0 - 0
test/test_025/model.bin


+ 1 - 0
test/test_025/s25.txt

@@ -0,0 +1 @@
+1 => 1

+ 42 - 33
test/test_025/test.xml

@@ -40,42 +40,51 @@ index test
       <Variant>	min_prefix_len = 5 </Variant>
     </Dynamic>
 }
+
+index quo
+{
+	source			= srctest
+	path			= <data_path/>/quo
+    charset_type 	= utf-8
+	exceptions		= <this_test/>/s25.txt
+}
 </config>
 
 <queries>
-<query>a</query>
-<query>bb</query>
-<query>ccc</query>
-<query>dddd</query>
-<query>eeeee</query>
-<query>ffffff</query>
-<query>ggggggg</query>
-<query>b*</query>
-<query>cc*</query>
-<query>ddd*</query>
-<query>eeee*</query>
-<query>fffff*</query>
-<query>gggggg*</query>
-<query mode="extended2">a</query>
-<query mode="extended2">bb</query>
-<query mode="extended2">ccc</query>
-<query mode="extended2">dddd</query>
-<query mode="extended2">eeeee</query>
-<query mode="extended2">ffffff</query>
-<query mode="extended2">ggggggg</query>
-<query mode="extended2">b*</query>
-<query mode="extended2">cc*</query>
-<query mode="extended2">ddd*</query>
-<query mode="extended2">eeee*</query>
-<query mode="extended2">fffff*</query>
-<query mode="extended2">gggggg*</query>
-<query mode="phrase">hello me world</query>
-<query mode="phrase">hello two world</query>
-<query mode="phrase">hello four world</query>
-<query mode="phrase">hello me* world</query>
-<query mode="phrase">hello two* world</query>
-<query mode="phrase">hello four* world</query>
-<query mode="phrase">4 you</query>
+<query index="test">a</query>
+<query index="test">bb</query>
+<query index="test">ccc</query>
+<query index="test">dddd</query>
+<query index="test">eeeee</query>
+<query index="test">ffffff</query>
+<query index="test">ggggggg</query>
+<query index="test">b*</query>
+<query index="test">cc*</query>
+<query index="test">ddd*</query>
+<query index="test">eeee*</query>
+<query index="test">fffff*</query>
+<query index="test">gggggg*</query>
+<query mode="extended2" index="test">a</query>
+<query mode="extended2" index="test">bb</query>
+<query mode="extended2" index="test">ccc</query>
+<query mode="extended2" index="test">dddd</query>
+<query mode="extended2" index="test">eeeee</query>
+<query mode="extended2" index="test">ffffff</query>
+<query mode="extended2" index="test">ggggggg</query>
+<query mode="extended2" index="test">b*</query>
+<query mode="extended2" index="test">cc*</query>
+<query mode="extended2" index="test">ddd*</query>
+<query mode="extended2" index="test">eeee*</query>
+<query mode="extended2" index="test">fffff*</query>
+<query mode="extended2" index="test">gggggg*</query>
+<query mode="phrase" index="test">hello me world</query>
+<query mode="phrase" index="test">hello two world</query>
+<query mode="phrase" index="test">hello four world</query>
+<query mode="phrase" index="test">hello me* world</query>
+<query mode="phrase" index="test">hello two* world</query>
+<query mode="phrase" index="test">hello four* world</query>
+<query mode="phrase" index="test">4 you</query>
+<query mode="extended2" index="quo"> "hello two world"/1 </query>
 </queries>
 
 <db_create>

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 0 - 0
test/test_063/model.bin


+ 0 - 5
test/test_063/test.xml

@@ -3,11 +3,6 @@
 
 <name>blend characters</name>
 
-<requires>
-	<variant_match />
-</requires>
-
-
 <config>
 indexer
 {

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 0 - 0
test/test_081/model.bin


+ 17 - 0
test/test_081/test.xml

@@ -42,6 +42,17 @@ index all
 
 	hitless_words = all
 }
+
+index some1
+{
+	source = test
+	path = <data_path/>/some1
+
+	hitless_words = <this_test/>/words1.txt <this_test/>/words2.txt
+	html_strip		= 1
+	index_sp		= 1
+	index_zones		= zone*
+}
 </config>
 
 <db_create>
@@ -121,6 +132,12 @@ insert into sph_test values
 	<query mode="extended2" index="none">basket$</query>
 	<query mode="extended2" index="some">basket$</query>
 	<query mode="extended2" index="all ">basket$</query>
+	
+	<!-- regressions hitless vs extended query operators -->
+	<query mode="extended2" index="some1">"and the tree" &lt;&lt; Woodman </query>
+	<query mode="extended2" index="some1">Tin Woodman "and the tree"</query>
+	<query mode="extended2" index="some1">ZONESPAN:(zoneC,zoneB) Tin Woodman "and the tree"</query>
+	<query mode="extended2" index="some1">Woodman NEAR/10 "and the tree"</query>
 </queries>
 
 </test>

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 0 - 0
test/test_101/model.bin


+ 15 - 2
test/test_101/test.xml

@@ -32,6 +32,14 @@ index test1
 	rt_field		= content
 }
 
+index test2
+{
+	type			= rt
+	path			= data/test2
+	rt_attr_uint	= idd
+	rt_field		= content
+}
+
 </Config>
 
 <SPHQueries>
@@ -61,6 +69,11 @@ index test1
 	<SPHinxQL>set autocommit=1</SPHinxQL>
 	<SPHinxQL>select * from test</SPHinxQL>
 	<SPHinxQL>select * from test1</SPHinxQL>
-</SPHQueries>
-
+	
+	<!-- regression failed insert got passed next time -->
+	<SPHinxQL>insert into test2 values (1, 'test', 11)</SPHinxQL>
+	<SPHinxQL>insert into test2 values (2, 'test of my things going well', 22), (1, 'test', 12)</SPHinxQL>
+	<SPHinxQL>insert into test2 values (3, 'test', 22), (3, 'test', 12), (3, 'test', 23)</SPHinxQL>
+	<SPHinxQL>select id from test2</SPHinxQL>
+</SPHQueries>	
 </Test>

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 0 - 0
test/test_148/model.bin


+ 44 - 0
test/test_148/test.xml

@@ -82,6 +82,45 @@ index fl
 	path			= <data_path/>/fl
 	docinfo			= extern
 }
+
+source src_blend
+{
+	type = mysql
+	<sql_settings/>
+	sql_query = SELECT 1 as id, 'dummy1 dummy1 dummy1 dummy1 X-COM' as title, 'dummy1 dummy1, dummy1, dummy1 dummy1 dummy1 valid dummy1 dummy1 dummy1 X-COM, dummy1' as body
+	sql_field_string	= title
+	sql_field_string	= body
+}
+
+index idx_blend_crc
+{
+	source			= src_blend
+	path			= <data_path/>/idx_blend_crc
+	docinfo			= extern
+	
+	morphology		= stem_enru
+	min_prefix_len	= 3
+	enable_star		= 1
+	index_exact_words	= 1
+	blend_mode		= trim_head, trim_tail, trim_both
+	blend_chars 	= +, %, -
+	dict = crc	
+}
+
+index idx_blend_kw
+{
+	source			= src_blend
+	path			= <data_path/>/idx_blend_kw
+	docinfo			= extern
+	
+	morphology		= stem_enru
+	min_prefix_len	= 3
+	enable_star		= 1
+	index_exact_words	= 1
+	blend_mode		= trim_head, trim_tail, trim_both
+	blend_chars 	= +, %, -
+	dict			= keywords
+}
 </config>
 
 <db_create>
@@ -128,6 +167,11 @@ CREATE TABLE start_end_table
 	<sphinxql>select * from fl where match (' (@title we are) (@body live there) from time ')</sphinxql>
 	<sphinxql>select * from fl where match (' are (@body we) from time ')</sphinxql>
 	<sphinxql>select * from fl where match (' are (@body we) from (@title time the) here ')</sphinxql>
+	
+	<!-- regression of field end marker vs blended -->
+	<sphinxql>select * from idx_blend_crc where match (' x-com NEAR/10 valid')</sphinxql>
+	<sphinxql>select * from idx_blend_kw where match (' x-com NEAR/10 valid ')</sphinxql>
+	<sphinxql>CALL SNIPPETS ( 'dummy1 dummy1 valid dummy1 dummy1 dummy1 X-COM, dummy1', 'idx_blend_kw', 'x-com NEAR/10 valid', 1 as query_mode )</sphinxql>
 </sphqueries>
 
 <custom_insert><![CDATA[

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 0 - 0
test/test_175/model.bin


+ 35 - 0
test/test_175/test.xml

@@ -202,6 +202,22 @@ index rt_auto_expand
 	expand_keywords = 1
 }
 
+source some1
+{
+	type			= mysql
+	<sql_settings/>
+	sql_query		= select * from test_table1
+	sql_attr_uint	= gid
+	sql_field_string = title
+}
+
+
+index some1
+{
+	source = some1
+	path = <data_path/>/some1
+	hitless_words = all
+}
 </config>
 
 <db_create>
@@ -373,6 +389,25 @@ foreach ( $res as $r )
 	$results[] = $r;
 }
 
+// regression hitless vs subtree cache
+$results[] = 'hitless vs subtree cache';
+$client->SetSortMode ( SPH_SORT_ATTR_ASC, 'title' );
+$client->AddQuery ( "(\"aaa bbb\") (\"aaa cc\")", "some1" );
+$client->AddQuery ( "(\"aaa bbb\") (\"aaa cc\")", "some1" );
+$client->AddQuery ( "(\"aaa bbb\") (\"aaa cc\")", "some1" );
+$res = $client->RunQueries();
+if ( !$res )
+{
+	$results[] = $client->GetLastError();
+} else
+{
+	foreach ( $res as $r )
+	{
+		unset ( $r["time"] );
+		$results[] = $r;
+	}
+}
+
 // regression RT dictionary lost words on merge with ID64
 $query = create_function ( '$q, $sock','
 	

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно