1 /////////////////////////////////////////////////////////////////////////////
2 // Copyright (c) 2009-2014 Alan Wright. All rights reserved.
3 // Distributable under the terms of either the Apache License (Version 2.0)
4 // or the GNU Lesser General Public License.
5 /////////////////////////////////////////////////////////////////////////////
6
7 #include "TestInc.h"
8 #include "LuceneTestFixture.h"
9 #include "TestUtils.h"
10 #include "Document.h"
11 #include "Field.h"
12 #include "DateTools.h"
13 #include "FileReader.h"
14 #include "MockRAMDirectory.h"
15 #include "IndexWriter.h"
16 #include "StandardAnalyzer.h"
17 #include "WhitespaceAnalyzer.h"
18 #include "IndexReader.h"
19 #include "SegmentInfos.h"
20 #include "IndexCommit.h"
21 #include "FieldSortedTermVectorMapper.h"
22 #include "TermVectorEntryFreqSortedComparator.h"
23 #include "Term.h"
24 #include "TermDocs.h"
25 #include "SetBasedFieldSelector.h"
26 #include "FieldSelector.h"
27 #include "FSDirectory.h"
28 #include "IndexFileDeleter.h"
29 #include "KeepOnlyLastCommitDeletionPolicy.h"
30 #include "IndexSearcher.h"
31 #include "ScoreDoc.h"
32 #include "TopDocs.h"
33 #include "TermQuery.h"
34 #include "SegmentReader.h"
35 #include "FieldCache.h"
36 #include "ReadOnlyDirectoryReader.h"
37 #include "ReadOnlySegmentReader.h"
38 #include "FileUtils.h"
39
40 using namespace Lucene;
41
42 typedef LuceneTestFixture IndexReaderTest;
43
addDocumentWithFields(const IndexWriterPtr & writer)44 static void addDocumentWithFields(const IndexWriterPtr& writer) {
45 DocumentPtr doc = newLucene<Document>();
46 doc->add(newLucene<Field>(L"keyword", L"test1", Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
47 doc->add(newLucene<Field>(L"text", L"test1", Field::STORE_YES, Field::INDEX_ANALYZED));
48 doc->add(newLucene<Field>(L"unindexed", L"test1", Field::STORE_YES, Field::INDEX_NO));
49 doc->add(newLucene<Field>(L"unstored", L"test1", Field::STORE_NO, Field::INDEX_ANALYZED));
50 writer->addDocument(doc);
51 }
52
addDocumentWithDifferentFields(const IndexWriterPtr & writer)53 static void addDocumentWithDifferentFields(const IndexWriterPtr& writer) {
54 DocumentPtr doc = newLucene<Document>();
55 doc->add(newLucene<Field>(L"keyword2", L"test1", Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
56 doc->add(newLucene<Field>(L"text2", L"test1", Field::STORE_YES, Field::INDEX_ANALYZED));
57 doc->add(newLucene<Field>(L"unindexed2", L"test1", Field::STORE_YES, Field::INDEX_NO));
58 doc->add(newLucene<Field>(L"unstored2", L"test1", Field::STORE_NO, Field::INDEX_ANALYZED));
59 writer->addDocument(doc);
60 }
61
addDocumentWithTermVectorFields(const IndexWriterPtr & writer)62 static void addDocumentWithTermVectorFields(const IndexWriterPtr& writer) {
63 DocumentPtr doc = newLucene<Document>();
64 doc->add(newLucene<Field>(L"tvnot", L"tvnot", Field::STORE_YES, Field::INDEX_ANALYZED, Field::TERM_VECTOR_NO));
65 doc->add(newLucene<Field>(L"termvector", L"termvector", Field::STORE_YES, Field::INDEX_ANALYZED, Field::TERM_VECTOR_YES));
66 doc->add(newLucene<Field>(L"tvoffset", L"tvoffset", Field::STORE_YES, Field::INDEX_ANALYZED, Field::TERM_VECTOR_WITH_OFFSETS));
67 doc->add(newLucene<Field>(L"tvposition", L"tvposition", Field::STORE_YES, Field::INDEX_ANALYZED, Field::TERM_VECTOR_WITH_POSITIONS));
68 doc->add(newLucene<Field>(L"tvpositionoffset", L"tvpositionoffset", Field::STORE_YES, Field::INDEX_ANALYZED, Field::TERM_VECTOR_WITH_POSITIONS_OFFSETS));
69 writer->addDocument(doc);
70 }
71
addDoc(const IndexWriterPtr & writer,const String & value)72 static void addDoc(const IndexWriterPtr& writer, const String& value) {
73 DocumentPtr doc = newLucene<Document>();
74 doc->add(newLucene<Field>(L"content", value, Field::STORE_NO, Field::INDEX_ANALYZED));
75 writer->addDocument(doc);
76 }
77
checkTermDocsCount(const IndexReaderPtr & reader,const TermPtr & term,int32_t expected)78 static void checkTermDocsCount(const IndexReaderPtr& reader, const TermPtr& term, int32_t expected) {
79 TermDocsPtr tdocs;
80
81 LuceneException finally;
82 try {
83 tdocs = reader->termDocs(term);
84 EXPECT_TRUE(tdocs);
85 int32_t count = 0;
86 while (tdocs->next()) {
87 ++count;
88 }
89 EXPECT_EQ(expected, count);
90 } catch (LuceneException& e) {
91 finally = e;
92 }
93 if (tdocs) {
94 tdocs->close();
95 }
96 finally.throwException();
97 }
98
getDirectory()99 static DirectoryPtr getDirectory() {
100 return FSDirectory::open(FileUtils::joinPath(getTempDir(), L"testIndex"));
101 }
102
createDocument(const String & id)103 static DocumentPtr createDocument(const String& id) {
104 DocumentPtr doc = newLucene<Document>();
105 doc->add(newLucene<Field>(L"id", id, Field::STORE_YES, Field::INDEX_NOT_ANALYZED_NO_NORMS));
106 return doc;
107 }
108
TEST_F(IndexReaderTest,testCommitUserData)109 TEST_F(IndexReaderTest, testCommitUserData) {
110 RAMDirectoryPtr d = newLucene<MockRAMDirectory>();
111
112 MapStringString commitUserData = MapStringString::newInstance();
113 commitUserData.put(L"foo", L"fighters");
114
115 // set up writer
116 IndexWriterPtr writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), true, IndexWriter::MaxFieldLengthLIMITED);
117 writer->setMaxBufferedDocs(2);
118 for (int32_t i = 0; i < 27; ++i) {
119 addDocumentWithFields(writer);
120 }
121 writer->close();
122
123 IndexReaderPtr r = IndexReader::open(d, false);
124 r->deleteDocument(5);
125 r->flush(commitUserData);
126 r->close();
127
128 SegmentInfosPtr sis = newLucene<SegmentInfos>();
129 sis->read(d);
130 IndexReaderPtr r2 = IndexReader::open(d, false);
131 IndexCommitPtr c = r->getIndexCommit();
132 MapStringString expectedData = c->getUserData();
133
134 EXPECT_EQ(expectedData.size(), commitUserData.size());
135 for (MapStringString::iterator expected = expectedData.begin(); expected != expectedData.end(); ++expected) {
136 EXPECT_TRUE(commitUserData.find(expected->first) != commitUserData.end());
137 }
138 for (MapStringString::iterator commit = commitUserData.begin(); commit != commitUserData.end(); ++commit) {
139 EXPECT_TRUE(expectedData.find(commit->first) != expectedData.end());
140 }
141
142 EXPECT_EQ(sis->getCurrentSegmentFileName(), c->getSegmentsFileName());
143
144 EXPECT_TRUE(c->equals(r->getIndexCommit()));
145
146 // Change the index
147 writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), false, IndexWriter::MaxFieldLengthLIMITED);
148 writer->setMaxBufferedDocs(2);
149 for (int32_t i = 0; i < 7; ++i) {
150 addDocumentWithFields(writer);
151 }
152 writer->close();
153
154 IndexReaderPtr r3 = r2->reopen();
155 EXPECT_TRUE(!c->equals(r3->getIndexCommit()));
156 EXPECT_TRUE(!r2->getIndexCommit()->isOptimized());
157 r3->close();
158
159 writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), false, IndexWriter::MaxFieldLengthLIMITED);
160 writer->optimize();
161 writer->close();
162
163 r3 = r2->reopen();
164 EXPECT_TRUE(r3->getIndexCommit()->isOptimized());
165 r2->close();
166 r3->close();
167 d->close();
168 }
169
TEST_F(IndexReaderTest,testIsCurrent)170 TEST_F(IndexReaderTest, testIsCurrent) {
171 RAMDirectoryPtr d = newLucene<MockRAMDirectory>();
172 IndexWriterPtr writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), true, IndexWriter::MaxFieldLengthLIMITED);
173 addDocumentWithFields(writer);
174 writer->close();
175 // set up reader
176 IndexReaderPtr reader = IndexReader::open(d, false);
177 EXPECT_TRUE(reader->isCurrent());
178 // modify index by adding another document
179 writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), false, IndexWriter::MaxFieldLengthLIMITED);
180 addDocumentWithFields(writer);
181 writer->close();
182 EXPECT_TRUE(!reader->isCurrent());
183 // re-create index
184 writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), true, IndexWriter::MaxFieldLengthLIMITED);
185 addDocumentWithFields(writer);
186 writer->close();
187 EXPECT_TRUE(!reader->isCurrent());
188 reader->close();
189 d->close();
190 }
191
TEST_F(IndexReaderTest,testGetFieldNames)192 TEST_F(IndexReaderTest, testGetFieldNames) {
193 RAMDirectoryPtr d = newLucene<MockRAMDirectory>();
194 // set up writer
195 IndexWriterPtr writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), true, IndexWriter::MaxFieldLengthLIMITED);
196 addDocumentWithFields(writer);
197 writer->close();
198 // set up reader
199 IndexReaderPtr reader = IndexReader::open(d, false);
200 EXPECT_TRUE(reader->isCurrent());
201 HashSet<String> fieldNames = reader->getFieldNames(IndexReader::FIELD_OPTION_ALL);
202 EXPECT_TRUE(fieldNames.contains(L"keyword"));
203 EXPECT_TRUE(fieldNames.contains(L"text"));
204 EXPECT_TRUE(fieldNames.contains(L"unindexed"));
205 EXPECT_TRUE(fieldNames.contains(L"unstored"));
206 reader->close();
207 // add more documents
208 writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), false, IndexWriter::MaxFieldLengthLIMITED);
209 // want to get some more segments here
210 for (int32_t i = 0; i < 5 * writer->getMergeFactor(); ++i) {
211 addDocumentWithFields(writer);
212 }
213 // new fields are in some different segments (we hope)
214 for (int32_t i = 0; i < 5 * writer->getMergeFactor(); ++i) {
215 addDocumentWithDifferentFields(writer);
216 }
217 // new termvector fields
218 for (int32_t i = 0; i < 5 * writer->getMergeFactor(); ++i) {
219 addDocumentWithTermVectorFields(writer);
220 }
221
222 writer->close();
223 // verify fields again
224 reader = IndexReader::open(d, false);
225 fieldNames = reader->getFieldNames(IndexReader::FIELD_OPTION_ALL);
226 EXPECT_EQ(13, fieldNames.size()); // the following fields
227 EXPECT_TRUE(fieldNames.contains(L"keyword"));
228 EXPECT_TRUE(fieldNames.contains(L"text"));
229 EXPECT_TRUE(fieldNames.contains(L"unindexed"));
230 EXPECT_TRUE(fieldNames.contains(L"unstored"));
231 EXPECT_TRUE(fieldNames.contains(L"keyword2"));
232 EXPECT_TRUE(fieldNames.contains(L"text2"));
233 EXPECT_TRUE(fieldNames.contains(L"unindexed2"));
234 EXPECT_TRUE(fieldNames.contains(L"unstored2"));
235 EXPECT_TRUE(fieldNames.contains(L"tvnot"));
236 EXPECT_TRUE(fieldNames.contains(L"termvector"));
237 EXPECT_TRUE(fieldNames.contains(L"tvposition"));
238 EXPECT_TRUE(fieldNames.contains(L"tvoffset"));
239 EXPECT_TRUE(fieldNames.contains(L"tvpositionoffset"));
240
241 // verify that only indexed fields were returned
242 fieldNames = reader->getFieldNames(IndexReader::FIELD_OPTION_INDEXED);
243 EXPECT_EQ(11, fieldNames.size()); // 6 original + the 5 termvector fields
244 EXPECT_TRUE(fieldNames.contains(L"keyword"));
245 EXPECT_TRUE(fieldNames.contains(L"text"));
246 EXPECT_TRUE(fieldNames.contains(L"unstored"));
247 EXPECT_TRUE(fieldNames.contains(L"keyword2"));
248 EXPECT_TRUE(fieldNames.contains(L"text2"));
249 EXPECT_TRUE(fieldNames.contains(L"unstored2"));
250 EXPECT_TRUE(fieldNames.contains(L"tvnot"));
251 EXPECT_TRUE(fieldNames.contains(L"termvector"));
252 EXPECT_TRUE(fieldNames.contains(L"tvposition"));
253 EXPECT_TRUE(fieldNames.contains(L"tvoffset"));
254 EXPECT_TRUE(fieldNames.contains(L"tvpositionoffset"));
255
256 // verify that only unindexed fields were returned
257 fieldNames = reader->getFieldNames(IndexReader::FIELD_OPTION_UNINDEXED);
258 EXPECT_EQ(2, fieldNames.size()); // the following fields
259 EXPECT_TRUE(fieldNames.contains(L"unindexed"));
260 EXPECT_TRUE(fieldNames.contains(L"unindexed2"));
261
262 // verify index term vector fields
263 fieldNames = reader->getFieldNames(IndexReader::FIELD_OPTION_TERMVECTOR);
264 EXPECT_EQ(1, fieldNames.size()); // 1 field has term vector only
265 EXPECT_TRUE(fieldNames.contains(L"termvector"));
266
267 fieldNames = reader->getFieldNames(IndexReader::FIELD_OPTION_TERMVECTOR_WITH_POSITION);
268 EXPECT_EQ(1, fieldNames.size()); // 4 fields are indexed with term vectors
269 EXPECT_TRUE(fieldNames.contains(L"tvposition"));
270
271 fieldNames = reader->getFieldNames(IndexReader::FIELD_OPTION_TERMVECTOR_WITH_OFFSET);
272 EXPECT_EQ(1, fieldNames.size()); // 4 fields are indexed with term vectors
273 EXPECT_TRUE(fieldNames.contains(L"tvoffset"));
274
275 fieldNames = reader->getFieldNames(IndexReader::FIELD_OPTION_TERMVECTOR_WITH_POSITION_OFFSET);
276 EXPECT_EQ(1, fieldNames.size()); // 4 fields are indexed with term vectors
277 EXPECT_TRUE(fieldNames.contains(L"tvpositionoffset"));
278 reader->close();
279 d->close();
280 }
281
TEST_F(IndexReaderTest,testTermVectors)282 TEST_F(IndexReaderTest, testTermVectors) {
283 RAMDirectoryPtr d = newLucene<MockRAMDirectory>();
284 // set up writer
285 IndexWriterPtr writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), true, IndexWriter::MaxFieldLengthLIMITED);
286 // want to get some more segments here
287 // new termvector fields
288 for (int32_t i = 0; i < 5 * writer->getMergeFactor(); ++i) {
289 DocumentPtr doc = newLucene<Document>();
290 doc->add(newLucene<Field>(L"tvnot", L"one two two three three three", Field::STORE_YES, Field::INDEX_ANALYZED, Field::TERM_VECTOR_NO));
291 doc->add(newLucene<Field>(L"termvector", L"one two two three three three", Field::STORE_YES, Field::INDEX_ANALYZED, Field::TERM_VECTOR_YES));
292 doc->add(newLucene<Field>(L"tvoffset", L"one two two three three three", Field::STORE_YES, Field::INDEX_ANALYZED, Field::TERM_VECTOR_WITH_OFFSETS));
293 doc->add(newLucene<Field>(L"tvposition", L"one two two three three three", Field::STORE_YES, Field::INDEX_ANALYZED, Field::TERM_VECTOR_WITH_POSITIONS));
294 doc->add(newLucene<Field>(L"tvpositionoffset", L"one two two three three three", Field::STORE_YES, Field::INDEX_ANALYZED, Field::TERM_VECTOR_WITH_POSITIONS_OFFSETS));
295 writer->addDocument(doc);
296 }
297 writer->close();
298 IndexReaderPtr reader = IndexReader::open(d, false);
299 FieldSortedTermVectorMapperPtr mapper = newLucene<FieldSortedTermVectorMapper>(TermVectorEntryFreqSortedComparator::compare);
300 reader->getTermFreqVector(0, mapper);
301 MapStringCollectionTermVectorEntry map = mapper->getFieldToTerms();
302 EXPECT_TRUE(map);
303 EXPECT_EQ(map.size(), 4);
304 Collection<TermVectorEntryPtr> set = map.get(L"termvector");
305 for (Collection<TermVectorEntryPtr>::iterator entry = set.begin(); entry != set.end(); ++entry) {
306 EXPECT_TRUE(*entry);
307 }
308 }
309
TEST_F(IndexReaderTest,testBasicDelete)310 TEST_F(IndexReaderTest, testBasicDelete) {
311 RAMDirectoryPtr dir = newLucene<MockRAMDirectory>();
312 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
313 TermPtr searchTerm = newLucene<Term>(L"content", L"aaa");
314
315 // add 100 documents with term : aaa
316 for (int32_t i = 0; i < 100; ++i) {
317 addDoc(writer, searchTerm->text());
318 }
319 writer->close();
320
321 // open reader at this point - this should fix the view of the
322 // index at the point of having 100 "aaa" documents and 0 "bbb"
323 IndexReaderPtr reader = IndexReader::open(dir, false);
324 EXPECT_EQ(100, reader->docFreq(searchTerm));
325 checkTermDocsCount(reader, searchTerm, 100);
326 reader->close();
327
328 // delete documents containing term: aaa
329 int32_t deleted = 0;
330 reader = IndexReader::open(dir, false);
331 deleted = reader->deleteDocuments(searchTerm);
332 EXPECT_EQ(100, deleted);
333 EXPECT_EQ(100, reader->docFreq(searchTerm));
334 checkTermDocsCount(reader, searchTerm, 0);
335
336 // open a 2nd reader to make sure first reader can commit its changes (.del)
337 // while second reader is open
338 IndexReaderPtr reader2 = IndexReader::open(dir, false);
339 reader->close();
340
341 // create a new reader and re-test
342 reader = IndexReader::open(dir, false);
343 EXPECT_EQ(100, reader->docFreq(searchTerm));
344 checkTermDocsCount(reader, searchTerm, 0);
345 reader->close();
346 reader2->close();
347 dir->close();
348 }
349
TEST_F(IndexReaderTest,testBinaryFields)350 TEST_F(IndexReaderTest, testBinaryFields) {
351 DirectoryPtr dir = newLucene<RAMDirectory>();
352 uint8_t _bin[10] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 };
353 ByteArray bin(ByteArray::newInstance(10));
354 std::memcpy(bin.get(), _bin, 10);
355
356 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthUNLIMITED);
357 for (int32_t i = 0; i < 10; ++i) {
358 addDoc(writer, L"document number " + StringUtils::toString(i + 1));
359 addDocumentWithFields(writer);
360 addDocumentWithDifferentFields(writer);
361 addDocumentWithTermVectorFields(writer);
362 }
363 writer->close();
364 writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), false, IndexWriter::MaxFieldLengthLIMITED);
365 DocumentPtr doc = newLucene<Document>();
366 doc->add(newLucene<Field>(L"bin1", bin, Field::STORE_YES));
367 doc->add(newLucene<Field>(L"junk", L"junk text", Field::STORE_NO, Field::INDEX_ANALYZED));
368 writer->addDocument(doc);
369 writer->close();
370 IndexReaderPtr reader = IndexReader::open(dir, false);
371 doc = reader->document(reader->maxDoc() - 1);
372 Collection<FieldPtr> fields = doc->getFields(L"bin1");
373 EXPECT_TRUE(fields);
374 EXPECT_EQ(1, fields.size());
375 FieldPtr b1 = fields[0];
376 EXPECT_TRUE(b1->isBinary());
377 ByteArray data1 = b1->getBinaryValue();
378 EXPECT_EQ(bin.size(), b1->getBinaryLength());
379 EXPECT_TRUE(std::memcmp(bin.get(), data1.get() + b1->getBinaryOffset(), bin.size()) == 0);
380 HashSet<String> lazyFields = HashSet<String>::newInstance();
381 lazyFields.add(L"bin1");
382 FieldSelectorPtr sel = newLucene<SetBasedFieldSelector>(HashSet<String>::newInstance(), lazyFields);
383 doc = reader->document(reader->maxDoc() - 1, sel);
384 Collection<FieldablePtr> fieldables = doc->getFieldables(L"bin1");
385 EXPECT_TRUE(fieldables);
386 EXPECT_EQ(1, fieldables.size());
387 FieldablePtr fb1 = fieldables[0];
388 EXPECT_TRUE(fb1->isBinary());
389 EXPECT_EQ(bin.size(), fb1->getBinaryLength());
390 data1 = fb1->getBinaryValue();
391 EXPECT_EQ(bin.size(), fb1->getBinaryLength());
392 EXPECT_TRUE(std::memcmp(bin.get(), data1.get() + fb1->getBinaryOffset(), bin.size()) == 0);
393 reader->close();
394
395 // force optimize
396 writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), false, IndexWriter::MaxFieldLengthLIMITED);
397 writer->optimize();
398 writer->close();
399 reader = IndexReader::open(dir, false);
400 doc = reader->document(reader->maxDoc() - 1);
401 fields = doc->getFields(L"bin1");
402 EXPECT_TRUE(fields);
403 EXPECT_EQ(1, fields.size());
404 b1 = fields[0];
405 EXPECT_TRUE(b1->isBinary());
406 data1 = b1->getBinaryValue();
407 EXPECT_EQ(bin.size(), b1->getBinaryLength());
408 EXPECT_TRUE(std::memcmp(bin.get(), data1.get() + b1->getBinaryOffset(), bin.size()) == 0);
409 reader->close();
410 }
411
412 /// Make sure attempts to make changes after reader is closed throws IOException
TEST_F(IndexReaderTest,testChangesAfterClose)413 TEST_F(IndexReaderTest, testChangesAfterClose) {
414 DirectoryPtr dir = newLucene<RAMDirectory>();
415 TermPtr searchTerm = newLucene<Term>(L"content", L"aaa");
416
417 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
418 // add 11 documents with term : aaa
419 for (int32_t i = 0; i < 11; ++i) {
420 addDoc(writer, searchTerm->text());
421 }
422 writer->close();
423
424 IndexReaderPtr reader = IndexReader::open(dir, false);
425
426 // Close reader
427 reader->close();
428
429 // Then, try to make changes
430 try {
431 reader->deleteDocument(4);
432 } catch (AlreadyClosedException& e) {
433 EXPECT_TRUE(check_exception(LuceneException::AlreadyClosed)(e));
434 }
435 try {
436 reader->setNorm(5, L"aaa", 2.0);
437 } catch (AlreadyClosedException& e) {
438 EXPECT_TRUE(check_exception(LuceneException::AlreadyClosed)(e));
439 }
440 try {
441 reader->undeleteAll();
442 } catch (AlreadyClosedException& e) {
443 EXPECT_TRUE(check_exception(LuceneException::AlreadyClosed)(e));
444 }
445 }
446
447 /// Make sure we get lock obtain failed exception with 2 writers
TEST_F(IndexReaderTest,testLockObtainFailed)448 TEST_F(IndexReaderTest, testLockObtainFailed) {
449 DirectoryPtr dir = newLucene<RAMDirectory>();
450 TermPtr searchTerm = newLucene<Term>(L"content", L"aaa");
451
452 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
453 // add 11 documents with term : aaa
454 for (int32_t i = 0; i < 11; ++i) {
455 addDoc(writer, searchTerm->text());
456 }
457
458 IndexReaderPtr reader = IndexReader::open(dir, false);
459
460 // Try to make changes
461 try {
462 reader->deleteDocument(4);
463 } catch (LockObtainFailedException& e) {
464 EXPECT_TRUE(check_exception(LuceneException::LockObtainFailed)(e));
465 }
466 try {
467 reader->setNorm(5, L"aaa", 2.0);
468 } catch (LockObtainFailedException& e) {
469 EXPECT_TRUE(check_exception(LuceneException::LockObtainFailed)(e));
470 }
471 try {
472 reader->undeleteAll();
473 } catch (LockObtainFailedException& e) {
474 EXPECT_TRUE(check_exception(LuceneException::LockObtainFailed)(e));
475 }
476
477 writer->close();
478 reader->close();
479 }
480
TEST_F(IndexReaderTest,testWritingNorms)481 TEST_F(IndexReaderTest, testWritingNorms) {
482 String indexDir(FileUtils::joinPath(getTempDir(), L"lucenetestnormwriter"));
483 DirectoryPtr dir = FSDirectory::open(indexDir);
484 TermPtr searchTerm = newLucene<Term>(L"content", L"aaa");
485
486 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
487 addDoc(writer, searchTerm->text());
488 writer->close();
489
490 // now open reader & set norm for doc 0
491 IndexReaderPtr reader = IndexReader::open(dir, false);
492 reader->setNorm(0, L"content", 2.0);
493
494 // we should be holding the write lock now
495 EXPECT_TRUE(IndexWriter::isLocked(dir));
496
497 reader->commit(MapStringString());
498
499 // we should not be holding the write lock now
500 EXPECT_TRUE(!IndexWriter::isLocked(dir));
501
502 // open a 2nd reader
503 IndexReaderPtr reader2 = IndexReader::open(dir, false);
504
505 // set norm again for doc 0
506 reader->setNorm(0, L"content", 3.0);
507 EXPECT_TRUE(IndexWriter::isLocked(dir));
508
509 reader->close();
510
511 // we should not be holding the write lock now
512 EXPECT_TRUE(!IndexWriter::isLocked(dir));
513
514 reader2->close();
515 dir->close();
516
517 FileUtils::removeDirectory(indexDir);
518 }
519
520 /// Make sure you can set norms and commit, and there are no extra norms files left
TEST_F(IndexReaderTest,testWritingNormsNoReader)521 TEST_F(IndexReaderTest, testWritingNormsNoReader) {
522 RAMDirectoryPtr dir = newLucene<MockRAMDirectory>();
523
524 // add 1 documents with term : aaa
525 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
526 TermPtr searchTerm = newLucene<Term>(L"content", L"aaa");
527
528 writer->setUseCompoundFile(false);
529 addDoc(writer, searchTerm->text());
530 writer->close();
531
532 // now open reader & set norm for doc 0 (writes to _0_1.s0)
533 IndexReaderPtr reader = IndexReader::open(dir, false);
534 reader->setNorm(0, L"content", 2.0);
535 reader->close();
536
537 // now open reader again & set norm for doc 0 (writes to _0_2.s0)
538 reader = IndexReader::open(dir, false);
539 reader->setNorm(0, L"content", 2.0);
540 reader->close();
541 EXPECT_TRUE(!dir->fileExists(L"_0_1.s0"));
542
543 dir->close();
544 }
545
deleteReaderWriterConflict(bool optimize)546 static void deleteReaderWriterConflict(bool optimize) {
547 DirectoryPtr dir = getDirectory();
548
549 TermPtr searchTerm = newLucene<Term>(L"content", L"aaa");
550 TermPtr searchTerm2 = newLucene<Term>(L"content", L"bbb");
551
552 // add 100 documents with term : aaa
553 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
554 for (int32_t i = 0; i < 100; ++i) {
555 addDoc(writer, searchTerm->text());
556 }
557 writer->close();
558
559 // open reader at this point - this should fix the view of the index at the point of
560 // having 100 "aaa" documents and 0 "bbb"
561 IndexReaderPtr reader = IndexReader::open(dir, false);
562 EXPECT_EQ(100, reader->docFreq(searchTerm));
563 EXPECT_EQ(0, reader->docFreq(searchTerm2));
564 checkTermDocsCount(reader, searchTerm, 100);
565 checkTermDocsCount(reader, searchTerm2, 0);
566
567 // add 100 documents with term : bbb
568 writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), false, IndexWriter::MaxFieldLengthLIMITED);
569 for (int32_t i = 0; i < 100; ++i) {
570 addDoc(writer, searchTerm2->text());
571 }
572
573 // request optimization
574 // This causes a new segment to become current for all subsequent
575 // searchers. Because of this, deletions made via a previously open
576 // reader, which would be applied to that reader's segment, are lost
577 // for subsequent searchers/readers
578 if (optimize) {
579 writer->optimize();
580 }
581 writer->close();
582
583 // The reader should not see the new data
584 EXPECT_EQ(100, reader->docFreq(searchTerm));
585 EXPECT_EQ(0, reader->docFreq(searchTerm2));
586 checkTermDocsCount(reader, searchTerm, 100);
587 checkTermDocsCount(reader, searchTerm2, 0);
588
589 // delete documents containing term: aaa
590 // NOTE: the reader was created when only "aaa" documents were in
591 int32_t deleted = 0;
592 try {
593 deleted = reader->deleteDocuments(searchTerm);
594 } catch (StaleReaderException& e) {
595 EXPECT_TRUE(check_exception(LuceneException::StaleReader)(e));
596 }
597
598 // Re-open index reader and try again. This time it should see the new data.
599 reader->close();
600 reader = IndexReader::open(dir, false);
601 EXPECT_EQ(100, reader->docFreq(searchTerm));
602 EXPECT_EQ(100, reader->docFreq(searchTerm2));
603 checkTermDocsCount(reader, searchTerm, 100);
604 checkTermDocsCount(reader, searchTerm2, 100);
605
606 deleted = reader->deleteDocuments(searchTerm);
607 EXPECT_EQ(100, deleted);
608 EXPECT_EQ(100, reader->docFreq(searchTerm));
609 EXPECT_EQ(100, reader->docFreq(searchTerm2));
610 checkTermDocsCount(reader, searchTerm, 0);
611 checkTermDocsCount(reader, searchTerm2, 100);
612 reader->close();
613
614 // create a new reader and re-test
615 reader = IndexReader::open(dir, false);
616 EXPECT_EQ(100, reader->docFreq(searchTerm));
617 EXPECT_EQ(100, reader->docFreq(searchTerm2));
618 checkTermDocsCount(reader, searchTerm, 0);
619 checkTermDocsCount(reader, searchTerm2, 100);
620 reader->close();
621 }
622
TEST_F(IndexReaderTest,testDeleteReaderWriterConflictUnoptimized)623 TEST_F(IndexReaderTest, testDeleteReaderWriterConflictUnoptimized) {
624 deleteReaderWriterConflict(false);
625 }
626
TEST_F(IndexReaderTest,testDeleteReaderWriterConflictOptimized)627 TEST_F(IndexReaderTest, testDeleteReaderWriterConflictOptimized) {
628 deleteReaderWriterConflict(true);
629 }
630
TEST_F(IndexReaderTest,testFilesOpenClose)631 TEST_F(IndexReaderTest, testFilesOpenClose) {
632 // Create initial data set
633 String dirFile = FileUtils::joinPath(getTempDir(), L"testIndex");
634 DirectoryPtr dir = getDirectory();
635 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
636 addDoc(writer, L"test");
637 writer->close();
638 dir->close();
639
640 // Try to erase the data - this ensures that the writer closed all files
641 FileUtils::removeDirectory(dirFile);
642 dir = getDirectory();
643
644 // Now create the data set again, just as before
645 writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
646 addDoc(writer, L"test");
647 writer->close();
648 dir->close();
649
650 // Now open existing directory and test that reader closes all files
651 dir = getDirectory();
652 IndexReaderPtr reader1 = IndexReader::open(dir, false);
653 reader1->close();
654 dir->close();
655
656 // The following will fail if reader did not close all files
657 EXPECT_TRUE(FileUtils::removeDirectory(dirFile));
658 }
659
TEST_F(IndexReaderTest,testLastModified)660 TEST_F(IndexReaderTest, testLastModified) {
661 String fileDir = FileUtils::joinPath(getTempDir(), L"testIndex");
662 for (int32_t i = 0; i < 2; ++i) {
663 LuceneException finally;
664 try {
665 DirectoryPtr dir = i == 0 ? newLucene<MockRAMDirectory>() : getDirectory();
666 EXPECT_TRUE(!IndexReader::indexExists(dir));
667 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
668 addDocumentWithFields(writer);
669 EXPECT_TRUE(IndexWriter::isLocked(dir)); // writer open, so dir is locked
670 writer->close();
671 EXPECT_TRUE(IndexReader::indexExists(dir));
672 IndexReaderPtr reader = IndexReader::open(dir, false);
673 EXPECT_TRUE(!IndexWriter::isLocked(dir)); // reader only, no lock
674 int64_t version = IndexReader::lastModified(dir);
675 if (i == 1) {
676 int64_t version2 = IndexReader::lastModified(dir);
677 EXPECT_EQ(version, version2);
678 }
679 reader->close();
680
681 // modify index and check version has been incremented
682 LuceneThread::threadSleep(1000);
683
684 writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
685 addDocumentWithFields(writer);
686 writer->close();
687 reader = IndexReader::open(dir, false);
688 EXPECT_TRUE(version <= IndexReader::lastModified(dir));
689 reader->close();
690 dir->close();
691 } catch (LuceneException& e) {
692 finally = e;
693 }
694 if (i == 1) {
695 EXPECT_TRUE(FileUtils::removeDirectory(fileDir));
696 }
697 finally.throwException();
698 }
699 }
700
TEST_F(IndexReaderTest,testVersion)701 TEST_F(IndexReaderTest, testVersion) {
702 DirectoryPtr dir = newLucene<MockRAMDirectory>();
703 EXPECT_TRUE(!IndexReader::indexExists(dir));
704 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
705 addDocumentWithFields(writer);
706 EXPECT_TRUE(IndexWriter::isLocked(dir)); // writer open, so dir is locked
707 writer->close();
708 EXPECT_TRUE(IndexReader::indexExists(dir));
709 IndexReaderPtr reader = IndexReader::open(dir, false);
710 EXPECT_TRUE(!IndexWriter::isLocked(dir)); // reader only, no lock
711 int64_t version = IndexReader::getCurrentVersion(dir);
712 reader->close();
713 // modify index and check version has been incremented
714 writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
715 addDocumentWithFields(writer);
716 writer->close();
717 reader = IndexReader::open(dir, false);
718 EXPECT_TRUE(version < IndexReader::getCurrentVersion(dir));
719 reader->close();
720 dir->close();
721 }
722
TEST_F(IndexReaderTest,testLock)723 TEST_F(IndexReaderTest, testLock) {
724 DirectoryPtr dir = newLucene<MockRAMDirectory>();
725 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
726 addDocumentWithFields(writer);
727 writer->close();
728 writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), false, IndexWriter::MaxFieldLengthLIMITED);
729 IndexReaderPtr reader = IndexReader::open(dir, false);
730 try {
731 reader->deleteDocument(0);
732 } catch (LockObtainFailedException& e) {
733 EXPECT_TRUE(check_exception(LuceneException::LockObtainFailed)(e));
734 }
735 IndexWriter::unlock(dir); // this should not be done in the real world!
736 reader->deleteDocument(0);
737 reader->close();
738 writer->close();
739 dir->close();
740 }
741
TEST_F(IndexReaderTest,testUndeleteAll)742 TEST_F(IndexReaderTest, testUndeleteAll) {
743 DirectoryPtr dir = newLucene<MockRAMDirectory>();
744 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
745 addDocumentWithFields(writer);
746 addDocumentWithFields(writer);
747 writer->close();
748 IndexReaderPtr reader = IndexReader::open(dir, false);
749 reader->deleteDocument(0);
750 reader->deleteDocument(1);
751 reader->undeleteAll();
752 reader->close();
753 reader = IndexReader::open(dir, false);
754 EXPECT_EQ(2, reader->numDocs()); // nothing has really been deleted thanks to undeleteAll()
755 reader->close();
756 dir->close();
757 }
758
TEST_F(IndexReaderTest,testUndeleteAllAfterClose)759 TEST_F(IndexReaderTest, testUndeleteAllAfterClose) {
760 DirectoryPtr dir = newLucene<MockRAMDirectory>();
761 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
762 addDocumentWithFields(writer);
763 addDocumentWithFields(writer);
764 writer->close();
765 IndexReaderPtr reader = IndexReader::open(dir, false);
766 reader->deleteDocument(0);
767 reader->deleteDocument(1);
768 reader->close();
769 reader = IndexReader::open(dir, false);
770 reader->undeleteAll();
771 EXPECT_EQ(2, reader->numDocs()); // nothing has really been deleted thanks to undeleteAll()
772 reader->close();
773 dir->close();
774 }
775
TEST_F(IndexReaderTest,testUndeleteAllAfterCloseThenReopen)776 TEST_F(IndexReaderTest, testUndeleteAllAfterCloseThenReopen) {
777 DirectoryPtr dir = newLucene<MockRAMDirectory>();
778 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
779 addDocumentWithFields(writer);
780 addDocumentWithFields(writer);
781 writer->close();
782 IndexReaderPtr reader = IndexReader::open(dir, false);
783 reader->deleteDocument(0);
784 reader->deleteDocument(1);
785 reader->close();
786 reader = IndexReader::open(dir, false);
787 reader->undeleteAll();
788 reader->close();
789 reader = IndexReader::open(dir, false);
790 EXPECT_EQ(2, reader->numDocs()); // nothing has really been deleted thanks to undeleteAll()
791 reader->close();
792 dir->close();
793 }
794
deleteReaderReaderConflict(bool optimize)795 static void deleteReaderReaderConflict(bool optimize) {
796 DirectoryPtr dir = getDirectory();
797
798 TermPtr searchTerm1 = newLucene<Term>(L"content", L"aaa");
799 TermPtr searchTerm2 = newLucene<Term>(L"content", L"bbb");
800 TermPtr searchTerm3 = newLucene<Term>(L"content", L"ccc");
801
802 // add 100 documents with term : aaa
803 // add 100 documents with term : bbb
804 // add 100 documents with term : ccc
805 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
806 for (int32_t i = 0; i < 100; ++i) {
807 addDoc(writer, searchTerm1->text());
808 addDoc(writer, searchTerm2->text());
809 addDoc(writer, searchTerm3->text());
810 }
811 if (optimize) {
812 writer->optimize();
813 }
814 writer->close();
815
816 // open two readers
817 // both readers get segment info as exists at this time
818 IndexReaderPtr reader1 = IndexReader::open(dir, false);
819 EXPECT_EQ(100, reader1->docFreq(searchTerm1));
820 EXPECT_EQ(100, reader1->docFreq(searchTerm2));
821 EXPECT_EQ(100, reader1->docFreq(searchTerm3));
822 checkTermDocsCount(reader1, searchTerm1, 100);
823 checkTermDocsCount(reader1, searchTerm2, 100);
824 checkTermDocsCount(reader1, searchTerm3, 100);
825
826 IndexReaderPtr reader2 = IndexReader::open(dir, false);
827 EXPECT_EQ(100, reader2->docFreq(searchTerm1));
828 EXPECT_EQ(100, reader2->docFreq(searchTerm2));
829 EXPECT_EQ(100, reader2->docFreq(searchTerm3));
830 checkTermDocsCount(reader2, searchTerm1, 100);
831 checkTermDocsCount(reader2, searchTerm2, 100);
832 checkTermDocsCount(reader2, searchTerm3, 100);
833
834 // delete docs from reader 2 and close it
835 // delete documents containing term: aaa
836 // when the reader is closed, the segment info is updated and
837 // the first reader is now stale
838 reader2->deleteDocuments(searchTerm1);
839 EXPECT_EQ(100, reader2->docFreq(searchTerm1));
840 EXPECT_EQ(100, reader2->docFreq(searchTerm2));
841 EXPECT_EQ(100, reader2->docFreq(searchTerm3));
842 checkTermDocsCount(reader2, searchTerm1, 0);
843 checkTermDocsCount(reader2, searchTerm2, 100);
844 checkTermDocsCount(reader2, searchTerm3, 100);
845 reader2->close();
846
847 // Make sure reader 1 is unchanged since it was open earlier
848 EXPECT_EQ(100, reader1->docFreq(searchTerm1));
849 EXPECT_EQ(100, reader1->docFreq(searchTerm2));
850 EXPECT_EQ(100, reader1->docFreq(searchTerm3));
851 checkTermDocsCount(reader1, searchTerm1, 100);
852 checkTermDocsCount(reader1, searchTerm2, 100);
853 checkTermDocsCount(reader1, searchTerm3, 100);
854
855 // attempt to delete from stale reader
856 // delete documents containing term: bbb
857 try {
858 reader1->deleteDocuments(searchTerm2);
859 } catch (StaleReaderException& e) {
860 EXPECT_TRUE(check_exception(LuceneException::StaleReader)(e));
861 }
862
863 // recreate reader and try again
864 reader1->close();
865 reader1 = IndexReader::open(dir, false);
866 EXPECT_EQ(100, reader1->docFreq(searchTerm1));
867 EXPECT_EQ(100, reader1->docFreq(searchTerm2));
868 EXPECT_EQ(100, reader1->docFreq(searchTerm3));
869 checkTermDocsCount(reader1, searchTerm1, 0);
870 checkTermDocsCount(reader1, searchTerm2, 100);
871 checkTermDocsCount(reader1, searchTerm3, 100);
872
873 reader1->deleteDocuments(searchTerm2);
874 EXPECT_EQ(100, reader1->docFreq(searchTerm1));
875 EXPECT_EQ(100, reader1->docFreq(searchTerm2));
876 EXPECT_EQ(100, reader1->docFreq(searchTerm3));
877 checkTermDocsCount(reader1, searchTerm1, 0);
878 checkTermDocsCount(reader1, searchTerm2, 0);
879 checkTermDocsCount(reader1, searchTerm3, 100);
880 reader1->close();
881
882 // Open another reader to confirm that everything is deleted
883 reader2 = IndexReader::open(dir, false);
884 EXPECT_EQ(100, reader2->docFreq(searchTerm1));
885 EXPECT_EQ(100, reader2->docFreq(searchTerm2));
886 EXPECT_EQ(100, reader2->docFreq(searchTerm3));
887 checkTermDocsCount(reader2, searchTerm1, 0);
888 checkTermDocsCount(reader2, searchTerm2, 0);
889 checkTermDocsCount(reader2, searchTerm3, 100);
890 reader2->close();
891
892 dir->close();
893 }
894
TEST_F(IndexReaderTest,testDeleteReaderReaderConflictUnoptimized)895 TEST_F(IndexReaderTest, testDeleteReaderReaderConflictUnoptimized) {
896 deleteReaderReaderConflict(false);
897 }
898
TEST_F(IndexReaderTest,testDeleteReaderReaderConflictOptimized)899 TEST_F(IndexReaderTest, testDeleteReaderReaderConflictOptimized) {
900 deleteReaderReaderConflict(true);
901 }
902
903 /// Make sure if reader tries to commit but hits disk full that reader remains consistent and usable.
TEST_F(IndexReaderTest,testDiskFull)904 TEST_F(IndexReaderTest, testDiskFull) {
905 TermPtr searchTerm = newLucene<Term>(L"content", L"aaa");
906 int32_t START_COUNT = 157;
907 int32_t END_COUNT = 144;
908
909 // First build up a starting index
910 RAMDirectoryPtr startDir = newLucene<MockRAMDirectory>();
911 IndexWriterPtr writer = newLucene<IndexWriter>(startDir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
912 for (int32_t i = 0; i < 157; ++i) {
913 DocumentPtr doc = newLucene<Document>();
914 doc->add(newLucene<Field>(L"id", StringUtils::toString(i), Field::STORE_YES, Field::INDEX_NOT_ANALYZED));
915 doc->add(newLucene<Field>(L"content", L"aaa " + StringUtils::toString(i), Field::STORE_NO, Field::INDEX_ANALYZED));
916 writer->addDocument(doc);
917 }
918 writer->close();
919
920 int64_t diskUsage = startDir->sizeInBytes();
921 int64_t diskFree = diskUsage + 100;
922
923 LuceneException err;
924
925 bool done = false;
926
927 // Iterate with ever increasing free disk space
928 while (!done) {
929 MockRAMDirectoryPtr dir = newLucene<MockRAMDirectory>(startDir);
930
931 // If IndexReader hits disk full, it can write to the same files again.
932 dir->setPreventDoubleWrite(false);
933
934 IndexReaderPtr reader = IndexReader::open(dir, false);
935
936 // For each disk size, first try to commit against dir that will hit random IOExceptions and
937 // disk full; after, give it infinite disk space and turn off random IOExceptions and
938 // retry with same reader
939 bool success = false;
940
941 for (int32_t x = 0; x < 2; ++x) {
942 double rate = 0.05;
943 double diskRatio = ((double)diskFree) / (double)diskUsage;
944 int64_t thisDiskFree = 0;
945 String testName;
946
947 if (x == 0) {
948 thisDiskFree = diskFree;
949 if (diskRatio >= 2.0) {
950 rate /= 2;
951 }
952 if (diskRatio >= 4.0) {
953 rate /= 2;
954 }
955 if (diskRatio >= 6.0) {
956 rate = 0.0;
957 }
958 testName = L"disk full during reader.close() @ " + StringUtils::toString(thisDiskFree) + L" bytes";
959 } else {
960 thisDiskFree = 0;
961 rate = 0.0;
962 testName = L"reader re-use after disk full";
963 }
964
965 dir->setMaxSizeInBytes(thisDiskFree);
966 dir->setRandomIOExceptionRate(rate, diskFree);
967
968 try {
969 if (x == 0) {
970 int32_t docId = 12;
971 for (int32_t i = 0; i < 13; ++i) {
972 reader->deleteDocument(docId);
973 reader->setNorm(docId, L"contents", 2.0);
974 docId += 12;
975 }
976 }
977 reader->close();
978 success = true;
979 if (x == 0) {
980 done = true;
981 }
982 } catch (IOException& e) {
983 err = e;
984 if (x == 1) {
985 FAIL() << testName << " hit IOException after disk space was freed up";
986 }
987 }
988
989 // Whether we succeeded or failed, check that all un-referenced files were in fact deleted (ie,
990 // we did not create garbage). Just create a new IndexFileDeleter, have it delete unreferenced
991 // files, then verify that in fact no files were deleted
992 HashSet<String> _startFiles = dir->listAll();
993 SegmentInfosPtr infos = newLucene<SegmentInfos>();
994 infos->read(dir);
995 IndexFileDeleterPtr deleter = newLucene<IndexFileDeleter>(dir, newLucene<KeepOnlyLastCommitDeletionPolicy>(), infos, InfoStreamPtr(), DocumentsWriterPtr(), HashSet<String>());
996 HashSet<String> _endFiles = dir->listAll();
997
998 Collection<String> startFiles = Collection<String>::newInstance(_startFiles.begin(), _startFiles.end());
999 Collection<String> endFiles = Collection<String>::newInstance(_endFiles.begin(), _endFiles.end());
1000
1001 std::sort(startFiles.begin(), startFiles.end());
1002 std::sort(endFiles.begin(), endFiles.end());
1003
1004 if (!startFiles.equals(endFiles)) {
1005 String successStr = success ? L"success" : L"IOException";
1006 FAIL() << "reader.close() failed to delete unreferenced files after " << successStr << " (" << diskFree << " bytes)";
1007 }
1008
1009 // Finally, verify index is not corrupt, and, if we succeeded, we see all docs changed, and if
1010 // we failed, we see either all docs or no docs changed (transactional semantics)
1011 IndexReaderPtr newReader;
1012 EXPECT_NO_THROW(newReader = IndexReader::open(dir, false));
1013
1014 IndexSearcherPtr searcher = newLucene<IndexSearcher>(newReader);
1015 Collection<ScoreDocPtr> hits;
1016 EXPECT_NO_THROW(hits = searcher->search(newLucene<TermQuery>(searchTerm), FilterPtr(), 1000)->scoreDocs);
1017 int32_t result2 = hits.size();
1018 if (success) {
1019 if (result2 != END_COUNT) {
1020 FAIL() << testName << ": method did not throw exception but hits.size() for search on term 'aaa' is " << result2 << " instead of expected " << END_COUNT;
1021 }
1022 } else {
1023 // On hitting exception we still may have added all docs
1024 if (result2 != START_COUNT && result2 != END_COUNT) {
1025 FAIL() << testName << ": method did throw exception but hits.size() for search on term 'aaa' is " << result2 << " instead of expected " << END_COUNT;
1026 }
1027 }
1028
1029 searcher->close();
1030 newReader->close();
1031
1032 if (result2 == END_COUNT) {
1033 break;
1034 }
1035 }
1036
1037 dir->close();
1038
1039 // Try again with 10 more bytes of free space
1040 diskFree += 10;
1041 }
1042
1043 startDir->close();
1044 }
1045
TEST_F(IndexReaderTest,testDocsOutOfOrder)1046 TEST_F(IndexReaderTest, testDocsOutOfOrder) {
1047 DirectoryPtr dir = newLucene<MockRAMDirectory>();
1048 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
1049 for (int32_t i = 0; i < 11; ++i) {
1050 addDoc(writer, L"aaa");
1051 }
1052 writer->close();
1053 IndexReaderPtr reader = IndexReader::open(dir, false);
1054
1055 // Try to delete an invalid docId, yet, within range of the final bits of the BitVector
1056 try {
1057 reader->deleteDocument(11);
1058 } catch (IndexOutOfBoundsException& e) {
1059 EXPECT_TRUE(check_exception(LuceneException::IndexOutOfBounds)(e));
1060 }
1061
1062 reader->close();
1063
1064 writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), false, IndexWriter::MaxFieldLengthLIMITED);
1065
1066 // We must add more docs to get a new segment written
1067 for (int32_t i = 0; i < 11; ++i) {
1068 addDoc(writer, L"aaa");
1069 }
1070
1071 EXPECT_NO_THROW(writer->optimize());
1072 writer->close();
1073 dir->close();
1074 }
1075
TEST_F(IndexReaderTest,testExceptionReleaseWriteLock)1076 TEST_F(IndexReaderTest, testExceptionReleaseWriteLock) {
1077 DirectoryPtr dir = newLucene<MockRAMDirectory>();
1078 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), true, IndexWriter::MaxFieldLengthLIMITED);
1079 addDoc(writer, L"aaa");
1080 writer->close();
1081
1082 IndexReaderPtr reader = IndexReader::open(dir, false);
1083
1084 try {
1085 reader->deleteDocument(1);
1086 } catch (IndexOutOfBoundsException& e) {
1087 EXPECT_TRUE(check_exception(LuceneException::IndexOutOfBounds)(e));
1088 }
1089
1090 reader->close();
1091
1092 EXPECT_TRUE(!IndexWriter::isLocked(dir));
1093
1094 reader = IndexReader::open(dir, false);
1095
1096 try {
1097 reader->setNorm(1, L"content", 2.0);
1098 } catch (IndexOutOfBoundsException& e) {
1099 EXPECT_TRUE(check_exception(LuceneException::IndexOutOfBounds)(e));
1100 }
1101
1102 reader->close();
1103
1104 EXPECT_TRUE(!IndexWriter::isLocked(dir));
1105
1106 dir->close();
1107 }
1108
TEST_F(IndexReaderTest,testOpenReaderAfterDelete)1109 TEST_F(IndexReaderTest, testOpenReaderAfterDelete) {
1110 String indexDir(FileUtils::joinPath(getTempDir(), L"deletetest"));
1111 DirectoryPtr dir = FSDirectory::open(indexDir);
1112 try {
1113 IndexReader::open(dir, false);
1114 } catch (NoSuchDirectoryException& e) {
1115 EXPECT_TRUE(check_exception(LuceneException::NoSuchDirectory)(e));
1116 }
1117
1118 FileUtils::removeDirectory(indexDir);
1119
1120 try {
1121 IndexReader::open(dir, false);
1122 } catch (NoSuchDirectoryException& e) {
1123 EXPECT_TRUE(check_exception(LuceneException::NoSuchDirectory)(e));
1124 }
1125
1126 dir->close();
1127 }
1128
TEST_F(IndexReaderTest,testGetIndexCommit)1129 TEST_F(IndexReaderTest, testGetIndexCommit) {
1130 RAMDirectoryPtr d = newLucene<MockRAMDirectory>();
1131 // set up writer
1132 IndexWriterPtr writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), true, IndexWriter::MaxFieldLengthLIMITED);
1133 writer->setMaxBufferedDocs(2);
1134 for (int32_t i = 0; i < 27; ++i) {
1135 addDocumentWithFields(writer);
1136 }
1137 writer->close();
1138
1139 SegmentInfosPtr sis = newLucene<SegmentInfos>();
1140 sis->read(d);
1141 IndexReaderPtr r = IndexReader::open(d, false);
1142 IndexCommitPtr c = r->getIndexCommit();
1143
1144 EXPECT_EQ(sis->getCurrentSegmentFileName(), c->getSegmentsFileName());
1145
1146 EXPECT_TRUE(c->equals(r->getIndexCommit()));
1147
1148 // Change the index
1149 writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), false, IndexWriter::MaxFieldLengthLIMITED);
1150 writer->setMaxBufferedDocs(2);
1151 for (int32_t i = 0; i < 7; ++i) {
1152 addDocumentWithFields(writer);
1153 }
1154 writer->close();
1155
1156 IndexReaderPtr r2 = r->reopen();
1157 EXPECT_TRUE(!c->equals(r2->getIndexCommit()));
1158 EXPECT_TRUE(!r2->getIndexCommit()->isOptimized());
1159 r2->close();
1160
1161 writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), false, IndexWriter::MaxFieldLengthLIMITED);
1162 writer->optimize();
1163 writer->close();
1164
1165 r2 = r->reopen();
1166 EXPECT_TRUE(r2->getIndexCommit()->isOptimized());
1167
1168 r->close();
1169 r2->close();
1170 d->close();
1171 }
1172
TEST_F(IndexReaderTest,testReadOnly)1173 TEST_F(IndexReaderTest, testReadOnly) {
1174 RAMDirectoryPtr d = newLucene<MockRAMDirectory>();
1175 IndexWriterPtr writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), true, IndexWriter::MaxFieldLengthLIMITED);
1176 addDocumentWithFields(writer);
1177 writer->commit();
1178 addDocumentWithFields(writer);
1179 writer->close();
1180
1181 IndexReaderPtr r = IndexReader::open(d, true);
1182 try {
1183 r->deleteDocument(0);
1184 } catch (UnsupportedOperationException& e) {
1185 EXPECT_TRUE(check_exception(LuceneException::UnsupportedOperation)(e));
1186 }
1187
1188 writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), false, IndexWriter::MaxFieldLengthLIMITED);
1189 addDocumentWithFields(writer);
1190 writer->close();
1191
1192 // Make sure reopen is still readonly
1193 IndexReaderPtr r2 = r->reopen();
1194 r->close();
1195
1196 EXPECT_NE(r, r2);
1197 try {
1198 r2->deleteDocument(0);
1199 } catch (UnsupportedOperationException& e) {
1200 EXPECT_TRUE(check_exception(LuceneException::UnsupportedOperation)(e));
1201 }
1202
1203 writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), false, IndexWriter::MaxFieldLengthLIMITED);
1204 writer->optimize();
1205 writer->close();
1206
1207 // Make sure reopen to a single segment is still readonly
1208 IndexReaderPtr r3 = r2->reopen();
1209 r2->close();
1210
1211 EXPECT_NE(r, r2);
1212 try {
1213 r3->deleteDocument(0);
1214 } catch (UnsupportedOperationException& e) {
1215 EXPECT_TRUE(check_exception(LuceneException::UnsupportedOperation)(e));
1216 }
1217
1218 // Make sure write lock isn't held
1219 writer = newLucene<IndexWriter>(d, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), false, IndexWriter::MaxFieldLengthLIMITED);
1220 writer->close();
1221
1222 r3->close();
1223 }
1224
TEST_F(IndexReaderTest,testIndexReader)1225 TEST_F(IndexReaderTest, testIndexReader) {
1226 RAMDirectoryPtr dir = newLucene<RAMDirectory>();
1227 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), IndexWriter::MaxFieldLengthUNLIMITED);
1228 writer->addDocument(createDocument(L"a"));
1229 writer->addDocument(createDocument(L"b"));
1230 writer->addDocument(createDocument(L"c"));
1231 writer->close();
1232 IndexReaderPtr reader = IndexReader::open(dir, false);
1233 reader->deleteDocuments(newLucene<Term>(L"id", L"a"));
1234 reader->flush();
1235 reader->deleteDocuments(newLucene<Term>(L"id", L"b"));
1236 reader->close();
1237 IndexReader::open(dir, true)->close();
1238 }
1239
TEST_F(IndexReaderTest,testIndexReaderUnDeleteAll)1240 TEST_F(IndexReaderTest, testIndexReaderUnDeleteAll) {
1241 MockRAMDirectoryPtr dir = newLucene<MockRAMDirectory>();
1242 dir->setPreventDoubleWrite(false);
1243 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), IndexWriter::MaxFieldLengthUNLIMITED);
1244 writer->addDocument(createDocument(L"a"));
1245 writer->addDocument(createDocument(L"b"));
1246 writer->addDocument(createDocument(L"c"));
1247 writer->close();
1248 IndexReaderPtr reader = IndexReader::open(dir, false);
1249 reader->deleteDocuments(newLucene<Term>(L"id", L"a"));
1250 reader->flush();
1251 reader->deleteDocuments(newLucene<Term>(L"id", L"b"));
1252 reader->undeleteAll();
1253 reader->deleteDocuments(newLucene<Term>(L"id", L"b"));
1254 reader->close();
1255 IndexReader::open(dir, true)->close();
1256 dir->close();
1257 }
1258
1259 /// Make sure on attempting to open an IndexReader on a non-existent directory, you get a good exception
TEST_F(IndexReaderTest,testNoDir)1260 TEST_F(IndexReaderTest, testNoDir) {
1261 String indexDir(FileUtils::joinPath(getTempDir(), L"doesnotexist"));
1262 DirectoryPtr dir = FSDirectory::open(indexDir);
1263 try {
1264 IndexReader::open(dir, true);
1265 } catch (NoSuchDirectoryException& e) {
1266 EXPECT_TRUE(check_exception(LuceneException::NoSuchDirectory)(e));
1267 }
1268 dir->close();
1269 }
1270
TEST_F(IndexReaderTest,testNoDupCommitFileNames)1271 TEST_F(IndexReaderTest, testNoDupCommitFileNames) {
1272 MockRAMDirectoryPtr dir = newLucene<MockRAMDirectory>();
1273 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<StandardAnalyzer>(LuceneVersion::LUCENE_CURRENT), IndexWriter::MaxFieldLengthLIMITED);
1274 writer->setMaxBufferedDocs(2);
1275 writer->addDocument(createDocument(L"a"));
1276 writer->addDocument(createDocument(L"a"));
1277 writer->addDocument(createDocument(L"a"));
1278 writer->close();
1279
1280 Collection<IndexCommitPtr> commits = IndexReader::listCommits(dir);
1281 for (Collection<IndexCommitPtr>::iterator commit = commits.begin(); commit != commits.end(); ++commit) {
1282 HashSet<String> files = (*commit)->getFileNames();
1283 HashSet<String> seen = HashSet<String>::newInstance();
1284 for (HashSet<String>::iterator fileName = files.begin(); fileName != files.end(); ++fileName) {
1285 EXPECT_TRUE(!seen.contains(*fileName));
1286 seen.add(*fileName);
1287 }
1288 }
1289
1290 dir->close();
1291 }
1292
1293 /// Ensure that on a cloned reader, segments reuse the doc values arrays in FieldCache
TEST_F(IndexReaderTest,testFieldCacheReuseAfterClone)1294 TEST_F(IndexReaderTest, testFieldCacheReuseAfterClone) {
1295 DirectoryPtr dir = newLucene<MockRAMDirectory>();
1296 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthLIMITED);
1297 DocumentPtr doc = newLucene<Document>();
1298 doc->add(newLucene<Field>(L"number", L"17", Field::STORE_NO, Field::INDEX_NOT_ANALYZED));
1299 writer->addDocument(doc);
1300 writer->close();
1301
1302 // Open reader
1303 IndexReaderPtr r = SegmentReader::getOnlySegmentReader(dir);
1304 Collection<int32_t> ints = FieldCache::DEFAULT()->getInts(r, L"number");
1305 EXPECT_EQ(1, ints.size());
1306 EXPECT_EQ(17, ints[0]);
1307
1308 // Clone reader
1309 IndexReaderPtr r2 = boost::dynamic_pointer_cast<IndexReader>(r->clone());
1310 r->close();
1311 EXPECT_NE(r2, r);
1312 Collection<int32_t> ints2 = FieldCache::DEFAULT()->getInts(r2, L"number");
1313 r2->close();
1314
1315 EXPECT_EQ(1, ints2.size());
1316 EXPECT_EQ(17, ints2[0]);
1317 EXPECT_TRUE(ints.equals(ints2));
1318
1319 dir->close();
1320 }
1321
1322 /// Ensure that on a reopened reader, that any shared segments reuse the doc values arrays in FieldCache
TEST_F(IndexReaderTest,testFieldCacheReuseAfterReopen)1323 TEST_F(IndexReaderTest, testFieldCacheReuseAfterReopen) {
1324 DirectoryPtr dir = newLucene<MockRAMDirectory>();
1325 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthLIMITED);
1326 DocumentPtr doc = newLucene<Document>();
1327 doc->add(newLucene<Field>(L"number", L"17", Field::STORE_NO, Field::INDEX_NOT_ANALYZED));
1328 writer->addDocument(doc);
1329 writer->commit();
1330
1331 // Open reader1
1332 IndexReaderPtr r = IndexReader::open(dir, false);
1333 IndexReaderPtr r1 = SegmentReader::getOnlySegmentReader(r);
1334 Collection<int32_t> ints = FieldCache::DEFAULT()->getInts(r1, L"number");
1335 EXPECT_EQ(1, ints.size());
1336 EXPECT_EQ(17, ints[0]);
1337
1338 // Add new segment
1339 writer->addDocument(doc);
1340 writer->commit();
1341
1342 // Reopen reader1 --> reader2
1343 IndexReaderPtr r2 = r->reopen();
1344 r->close();
1345 IndexReaderPtr sub0 = r2->getSequentialSubReaders()[0];
1346 Collection<int32_t> ints2 = FieldCache::DEFAULT()->getInts(sub0, L"number");
1347 r2->close();
1348 EXPECT_TRUE(ints.equals(ints2));
1349
1350 dir->close();
1351 }
1352
1353 /// Make sure all SegmentReaders are new when reopen switches readOnly
TEST_F(IndexReaderTest,testReopenChangeReadonly)1354 TEST_F(IndexReaderTest, testReopenChangeReadonly) {
1355 DirectoryPtr dir = newLucene<MockRAMDirectory>();
1356 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthLIMITED);
1357 DocumentPtr doc = newLucene<Document>();
1358 doc->add(newLucene<Field>(L"number", L"17", Field::STORE_NO, Field::INDEX_NOT_ANALYZED));
1359 writer->addDocument(doc);
1360 writer->commit();
1361
1362 // Open reader1
1363 IndexReaderPtr r = IndexReader::open(dir, false);
1364 EXPECT_TRUE(boost::dynamic_pointer_cast<DirectoryReader>(r));
1365 IndexReaderPtr r1 = SegmentReader::getOnlySegmentReader(r);
1366 Collection<int32_t> ints = FieldCache::DEFAULT()->getInts(r1, L"number");
1367 EXPECT_EQ(1, ints.size());
1368 EXPECT_EQ(17, ints[0]);
1369
1370 // Reopen to readonly with no chnages
1371 IndexReaderPtr r3 = r->reopen(true);
1372 EXPECT_TRUE(boost::dynamic_pointer_cast<ReadOnlyDirectoryReader>(r3));
1373 r3->close();
1374
1375 // Add new segment
1376 writer->addDocument(doc);
1377 writer->commit();
1378
1379 // Reopen reader1 --> reader2
1380 IndexReaderPtr r2 = r->reopen(true);
1381 r->close();
1382 EXPECT_TRUE(boost::dynamic_pointer_cast<ReadOnlyDirectoryReader>(r2));
1383 Collection<IndexReaderPtr> subs = r2->getSequentialSubReaders();
1384 Collection<int32_t> ints2 = FieldCache::DEFAULT()->getInts(subs[0], L"number");
1385 r2->close();
1386
1387 EXPECT_TRUE(boost::dynamic_pointer_cast<ReadOnlySegmentReader>(subs[0]));
1388 EXPECT_TRUE(boost::dynamic_pointer_cast<ReadOnlySegmentReader>(subs[1]));
1389 EXPECT_TRUE(ints.equals(ints2));
1390
1391 dir->close();
1392 }
1393
TEST_F(IndexReaderTest,testUniqueTermCount)1394 TEST_F(IndexReaderTest, testUniqueTermCount) {
1395 DirectoryPtr dir = newLucene<MockRAMDirectory>();
1396 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthUNLIMITED);
1397 DocumentPtr doc = newLucene<Document>();
1398 doc->add(newLucene<Field>(L"field", L"a b c d e f g h i j k l m n o p q r s t u v w x y z", Field::STORE_NO, Field::INDEX_ANALYZED));
1399 doc->add(newLucene<Field>(L"number", L"0 1 2 3 4 5 6 7 8 9", Field::STORE_NO, Field::INDEX_ANALYZED));
1400 writer->addDocument(doc);
1401 writer->addDocument(doc);
1402 writer->commit();
1403
1404 IndexReaderPtr r = IndexReader::open(dir, false);
1405 IndexReaderPtr r1 = SegmentReader::getOnlySegmentReader(r);
1406 EXPECT_EQ(36, r1->getUniqueTermCount());
1407 writer->addDocument(doc);
1408 writer->commit();
1409 IndexReaderPtr r2 = r->reopen();
1410 r->close();
1411 try {
1412 r2->getUniqueTermCount();
1413 } catch (UnsupportedOperationException& e) {
1414 EXPECT_TRUE(check_exception(LuceneException::UnsupportedOperation)(e));
1415 }
1416 Collection<IndexReaderPtr> subs = r2->getSequentialSubReaders();
1417 for (Collection<IndexReaderPtr>::iterator sub = subs.begin(); sub != subs.end(); ++sub) {
1418 EXPECT_EQ(36, (*sub)->getUniqueTermCount());
1419 }
1420 r2->close();
1421 writer->close();
1422 dir->close();
1423 }
1424
1425 /// don't load terms index
TEST_F(IndexReaderTest,testNoTermsIndex)1426 TEST_F(IndexReaderTest, testNoTermsIndex) {
1427 DirectoryPtr dir = newLucene<MockRAMDirectory>();
1428 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthUNLIMITED);
1429 DocumentPtr doc = newLucene<Document>();
1430 doc->add(newLucene<Field>(L"field", L"a b c d e f g h i j k l m n o p q r s t u v w x y z", Field::STORE_NO, Field::INDEX_ANALYZED));
1431 doc->add(newLucene<Field>(L"number", L"0 1 2 3 4 5 6 7 8 9", Field::STORE_NO, Field::INDEX_ANALYZED));
1432 writer->addDocument(doc);
1433 writer->addDocument(doc);
1434 writer->close();
1435
1436 IndexReaderPtr r = IndexReader::open(dir, IndexDeletionPolicyPtr(), true, -1);
1437 try {
1438 r->docFreq(newLucene<Term>(L"field", L"f"));
1439 } catch (IllegalStateException& e) {
1440 EXPECT_TRUE(check_exception(LuceneException::IllegalState)(e));
1441 }
1442 EXPECT_TRUE(!boost::dynamic_pointer_cast<SegmentReader>(r->getSequentialSubReaders()[0])->termsIndexLoaded());
1443 EXPECT_EQ(-1, boost::dynamic_pointer_cast<SegmentReader>(r->getSequentialSubReaders()[0])->getTermInfosIndexDivisor());
1444 writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthUNLIMITED);
1445 writer->addDocument(doc);
1446 writer->close();
1447
1448 // ensure re-open carries over no terms index
1449 IndexReaderPtr r2 = r->reopen();
1450 r->close();
1451 Collection<IndexReaderPtr> subReaders = r2->getSequentialSubReaders();
1452 EXPECT_EQ(2, subReaders.size());
1453 for (Collection<IndexReaderPtr>::iterator sub = subReaders.begin(); sub != subReaders.end(); ++sub) {
1454 SegmentReaderPtr subReader = boost::dynamic_pointer_cast<SegmentReader>(*sub);
1455 EXPECT_TRUE(!subReader->termsIndexLoaded());
1456 }
1457 r2->close();
1458 dir->close();
1459 }
1460
TEST_F(IndexReaderTest,testPrepareCommitIsCurrent)1461 TEST_F(IndexReaderTest, testPrepareCommitIsCurrent) {
1462 DirectoryPtr dir = newLucene<MockRAMDirectory>();
1463 IndexWriterPtr writer = newLucene<IndexWriter>(dir, newLucene<WhitespaceAnalyzer>(), IndexWriter::MaxFieldLengthUNLIMITED);
1464 DocumentPtr doc = newLucene<Document>();
1465 writer->addDocument(doc);
1466 IndexReaderPtr r = IndexReader::open(dir, true);
1467 EXPECT_TRUE(r->isCurrent());
1468 writer->addDocument(doc);
1469 writer->prepareCommit();
1470 EXPECT_TRUE(r->isCurrent());
1471 IndexReaderPtr r2 = r->reopen();
1472 EXPECT_TRUE(r == r2);
1473 writer->commit();
1474 EXPECT_TRUE(!r->isCurrent());
1475 writer->close();
1476 r->close();
1477 dir->close();
1478 }
1479