diff --git a/benchmark/src/main/scala/benchmark/IMDBBenchmark.scala b/benchmark/src/main/scala/benchmark/IMDBBenchmark.scala index ee1b4040..df48fb0b 100644 --- a/benchmark/src/main/scala/benchmark/IMDBBenchmark.scala +++ b/benchmark/src/main/scala/benchmark/IMDBBenchmark.scala @@ -78,11 +78,11 @@ object IMDBBenchmark { // extends IOApp { val io = for { _ <- implementation.init() _ = scribe.info("--- Stage 1 ---") - akasFile <- downloadFile(new File(baseDirectory, "title.akas.tsv"), Limit.OneMillion).elapsed + akasFile <- downloadFile(new File(baseDirectory, "title.akas.tsv"), Limit.OneHundredThousand).elapsed _ = scribe.info("--- Stage 2 ---") totalAka <- process(akasFile.value, implementation.map2TitleAka, implementation.persistTitleAka).elapsed _ = scribe.info("--- Stage 3 ---") - basicsFile <- downloadFile(new File(baseDirectory, "title.basics.tsv"), Limit.OneMillion).elapsed + basicsFile <- downloadFile(new File(baseDirectory, "title.basics.tsv"), Limit.OneHundredThousand).elapsed _ = scribe.info("--- Stage 4 ---") totalBasics <- process(basicsFile.value, implementation.map2TitleBasics, implementation.persistTitleBasics).elapsed _ = scribe.info("--- Stage 5 ---") diff --git a/benchmark/src/main/scala/benchmark/LightDBImplementation.scala b/benchmark/src/main/scala/benchmark/LightDBImplementation.scala index aa8881b1..adf2407e 100644 --- a/benchmark/src/main/scala/benchmark/LightDBImplementation.scala +++ b/benchmark/src/main/scala/benchmark/LightDBImplementation.scala @@ -21,7 +21,7 @@ object LightDBImplementation extends BenchmarkImplementation { override def map2TitleAka(map: Map[String, String]): TitleAkaLDB = TitleAkaLDB( titleId = map.value("titleId"), ordering = map.int("ordering"), - title = map.value("title"), + title = map.value("title").replace("\\N", "N"), region = map.option("region"), language = map.option("language"), types = map.list("types"), @@ -57,7 +57,10 @@ object LightDBImplementation extends BenchmarkImplementation { override def findByTitleId(titleId: String): IO[List[TitleAkaLDB]] = db.titleAka.query.filter(TitleAkaLDB.titleId === titleId).search().compile.toList.flatMap(_.map(_.get()).sequence) - override def flush(): IO[Unit] = db.titleAka.commit() + override def flush(): IO[Unit] = for { + _ <- db.titleAka.commit() + _ <- db.titleBasics.commit() + } yield () override def verifyTitleAka(): IO[Unit] = for { haloCount <- db.titleAka.store.count() @@ -86,9 +89,9 @@ object LightDBImplementation extends BenchmarkImplementation { object TitleAkaLDB extends JsonMapping[TitleAkaLDB] { override implicit val rw: RW[TitleAkaLDB] = RW.gen - val titleId: FD[String] = field("titleId", _.titleId).indexed() - val ordering: FD[Int] = field("ordering", _.ordering).indexed() - val title: FD[String] = field("title", _.title).indexed() + val titleId: FD[String] = field("titleId", _.titleId) + val ordering: FD[Int] = field("ordering", _.ordering) + val title: FD[String] = field("title", _.title) } case class TitleBasicsLDB(tconst: String, titleType: String, primaryTitle: String, originalTitle: String, isAdult: Boolean, startYear: Int, endYear: Int, runtimeMinutes: Int, genres: List[String], _id: Id[TitleBasics]) extends Document[TitleBasics] @@ -96,8 +99,8 @@ object LightDBImplementation extends BenchmarkImplementation { object TitleBasicsLDB extends JsonMapping[TitleBasicsLDB] { override implicit val rw: RW[TitleBasicsLDB] = RW.gen - val tconst: FD[String] = field("tconst", _.tconst).indexed() - val primaryTitle: FD[String] = field("primaryTitle", _.primaryTitle).indexed() - val originalTitle: FD[String] = field("originalTitle", _.originalTitle).indexed() + val tconst: FD[String] = field("tconst", _.tconst) + val primaryTitle: FD[String] = field("primaryTitle", _.primaryTitle) + val originalTitle: FD[String] = field("originalTitle", _.originalTitle) } } diff --git a/core/shared/src/main/scala/lightdb/data/JsonDataManager.scala b/core/shared/src/main/scala/lightdb/data/JsonDataManager.scala index 1c3d5ddd..659ec8b1 100644 --- a/core/shared/src/main/scala/lightdb/data/JsonDataManager.scala +++ b/core/shared/src/main/scala/lightdb/data/JsonDataManager.scala @@ -6,7 +6,11 @@ import fabric.rw._ class JsonDataManager[T: RW] extends DataManager[T] { override def fromArray(array: Array[Byte]): T = { val jsonString = new String(array, "UTF-8") - JsonParser(jsonString).as[T] + try { + JsonParser(jsonString).as[T] + } catch { + case t: Throwable => throw new RuntimeException(s"Unable to parse: [$jsonString]", t) + } } override def toArray(value: T): Array[Byte] = { diff --git a/lucene/src/main/scala/lightdb/index/lucene/IndexFeature.scala b/lucene/src/main/scala/lightdb/index/lucene/IndexFeature.scala index 79143e85..d0c96992 100644 --- a/lucene/src/main/scala/lightdb/index/lucene/IndexFeature.scala +++ b/lucene/src/main/scala/lightdb/index/lucene/IndexFeature.scala @@ -1,19 +1,19 @@ -package lightdb.index.lucene - -import com.outr.lucene4s.Lucene -import com.outr.lucene4s.field.{Field => LuceneField} -import com.outr.lucene4s.field.FieldType -import com.outr.lucene4s.field.value.support.ValueSupport -import lightdb.field.FieldFeature - -case class IndexFeature[F](fieldType: FieldType, - fullTextSearchable: Boolean, - sortable: Boolean, - valueSupport: ValueSupport[F]) extends FieldFeature { - def createField(name: String, lucene: Lucene): LuceneField[F] = lucene.create.field[F]( - name = name, - fieldType = fieldType, - fullTextSearchable = fullTextSearchable, - sortable = sortable - )(valueSupport) -} \ No newline at end of file +//package lightdb.index.lucene +// +//import com.outr.lucene4s.Lucene +//import com.outr.lucene4s.field.{Field => LuceneField} +//import com.outr.lucene4s.field.FieldType +//import com.outr.lucene4s.field.value.support.ValueSupport +//import lightdb.field.FieldFeature +// +//case class IndexFeature[F](fieldType: FieldType, +// fullTextSearchable: Boolean, +// sortable: Boolean, +// valueSupport: ValueSupport[F]) extends FieldFeature { +// def createField(name: String, lucene: Lucene): LuceneField[F] = lucene.create.field[F]( +// name = name, +// fieldType = fieldType, +// fullTextSearchable = fullTextSearchable, +// sortable = sortable +// )(valueSupport) +//} \ No newline at end of file diff --git a/lucene/src/main/scala/lightdb/index/lucene/LuceneIndexer.scala b/lucene/src/main/scala/lightdb/index/lucene/LuceneIndexer.scala index 289dc3f1..32d8c3bc 100644 --- a/lucene/src/main/scala/lightdb/index/lucene/LuceneIndexer.scala +++ b/lucene/src/main/scala/lightdb/index/lucene/LuceneIndexer.scala @@ -1,136 +1,136 @@ -package lightdb.index.lucene - -import cats.effect.IO -import com.outr.lucene4s._ -import com.outr.lucene4s.field.value.FieldAndValue -import com.outr.lucene4s.field.{FieldType, Field => LuceneField} -import com.outr.lucene4s.query.{MatchAllSearchTerm, SearchTerm, Sort => LuceneSort, SearchResult => L4SSearchResult} -import lightdb.collection.Collection -import lightdb.field.Field -import lightdb.index.{Indexer, SearchResult} -import lightdb.query.{Filter, Query, Sort} -import lightdb.{Document, Id} -import fs2.Stream - -case class LuceneIndexer[D <: Document[D]](collection: Collection[D], autoCommit: Boolean = false) extends Indexer[D] { li => - private val lucene = new DirectLucene( - uniqueFields = List("_id"), - directory = collection.db.directory.map(_.resolve(collection.collectionName)), - defaultFullTextSearchable = true, - autoCommit = autoCommit - ) - private var _fields: List[IndexedField[Any]] = Nil - private var fieldsMap: Map[String, IndexedField[Any]] = Map.empty - - private[lucene] def fields: List[IndexedField[Any]] = _fields - - val _id: IndexedField[Id[D]] = { - collection.mapping.field.get[Id[D]]("_id").getOrElse(throw new RuntimeException("_id field not specified")).indexed(fieldType = FieldType.Untokenized) - field[Id[D]]("_id") - } - - object field { - def get[F](name: String): Option[IndexedField[F]] = fieldsMap - .get(name) - .map(_.asInstanceOf[IndexedField[F]]) - .orElse { - collection.mapping.field.get[F](name).flatMap { f => - f - .features - .find(_.isInstanceOf[IndexFeature[_]]) - .map(_.asInstanceOf[IndexFeature[F]]) - .map { indexFeature => - val indexedField = IndexedField(indexFeature.createField(name, lucene), f) - li.synchronized { - val aif = indexedField.asInstanceOf[IndexedField[Any]] - _fields = _fields ::: List(aif) - fieldsMap += name -> aif - } - indexedField - } - } - } - def apply[F](name: String): IndexedField[F] = get[F](name).getOrElse(throw new RuntimeException(s"Field not defined: $name")) - } - - override def put(value: D): IO[D] = IO { - val fields = collection.mapping.fields.flatMap(f => field.get[Any](f.name)) - if (fields.nonEmpty && fields.tail.nonEmpty) { // No need to index if _id is the only field - val fieldsAndValues = fields.map(_.fieldAndValue(value)) - lucene - .doc() - .update(exact(_id.luceneField(value._id))) - .fields(fieldsAndValues: _*) - .index() - } - value - } - - override def delete(id: Id[D]): IO[Unit] = IO(lucene.delete(exact(_id.luceneField(id)))) - - override def commit(): IO[Unit] = IO { - lucene.commit() - } - - override def count(): IO[Long] = IO { - lucene.count() - } - - private[lucene] def indexed[F](luceneField: LuceneField[F], field: Field[D, F]): Unit = { - IndexedField[F](luceneField, field) - } - - private def filter2Lucene(filter: Filter[D]): SearchTerm = { - def fieldAndValue(field: Field[D, Any], value: Any): FieldAndValue[Any] = this.field[Any](field.name).luceneField(value) - filter match { - case Filter.Equals(field, value) => - val fv = fieldAndValue(field.asInstanceOf[Field[D, Any]], value) - exact(fv) - case Filter.NotEquals(field, value) => - val fv = fieldAndValue(field.asInstanceOf[Field[D, Any]], value) - none(exact(fv)) - case Filter.Includes(field, values) => - val terms = values.map(v => filter2Lucene(Filter.Equals(field.asInstanceOf[Field[D, Any]], v))) - any(terms: _*) - case Filter.Excludes(field, values) => - val terms = values.map(v => filter2Lucene(Filter.Equals(field.asInstanceOf[Field[D, Any]], v))) - none(terms: _*) - } - } - - override def search(query: Query[D]): Stream[IO, SearchResult[D]] = { - var q = lucene.query().offset(query.offset).limit(query.batchSize) - q = query.filters.foldLeft(q)((qb, f) => q.filter(filter2Lucene(f))) - q = q.sort(query.sort.map { - case Sort.BestMatch => LuceneSort.Score - case Sort.IndexOrder => LuceneSort.IndexOrder - case Sort.ByField(field, reverse) => LuceneSort(this.field[Any](field.name).luceneField, reverse) - }: _*) - - val pagedResults = q.search() - val pagedResultsIterator = pagedResults.pagedResultsIterator - Stream.fromBlockingIterator[IO](pagedResultsIterator, query.batchSize) - .map(result => LuceneSearchResult(query, pagedResults.total, result)) - } - - override def truncate(): IO[Unit] = IO(lucene.delete(MatchAllSearchTerm)) - - override def dispose(): IO[Unit] = IO(lucene.dispose()) - - case class IndexedField[F](luceneField: LuceneField[F], field: Field[D, F]) { - def fieldAndValue(value: D): FieldAndValue[F] = luceneField(field.getter(value)) - } - - case class LuceneSearchResult(query: Query[D], - total: Long, - result: L4SSearchResult) extends SearchResult[D] { - override lazy val id: Id[D] = result(_id.luceneField) - - override def get(): IO[D] = collection(id) - - override def apply[F](field: Field[D, F]): F = { - val indexedField = fields.find(_.field.name == field.name).getOrElse(throw new RuntimeException(s"Unable to find indexed field for: ${field.name}")) - result(indexedField.luceneField).asInstanceOf[F] - } - } -} \ No newline at end of file +//package lightdb.index.lucene +// +//import cats.effect.IO +//import com.outr.lucene4s._ +//import com.outr.lucene4s.field.value.FieldAndValue +//import com.outr.lucene4s.field.{FieldType, Field => LuceneField} +//import com.outr.lucene4s.query.{MatchAllSearchTerm, SearchTerm, Sort => LuceneSort, SearchResult => L4SSearchResult} +//import lightdb.collection.Collection +//import lightdb.field.Field +//import lightdb.index.{Indexer, SearchResult} +//import lightdb.query.{Filter, Query, Sort} +//import lightdb.{Document, Id} +//import fs2.Stream +// +//case class LuceneIndexer[D <: Document[D]](collection: Collection[D], autoCommit: Boolean = false) extends Indexer[D] { li => +// private val lucene = new DirectLucene( +// uniqueFields = List("_id"), +// directory = collection.db.directory.map(_.resolve(collection.collectionName)), +// defaultFullTextSearchable = true, +// autoCommit = autoCommit +// ) +// private var _fields: List[IndexedField[Any]] = Nil +// private var fieldsMap: Map[String, IndexedField[Any]] = Map.empty +// +// private[lucene] def fields: List[IndexedField[Any]] = _fields +// +// val _id: IndexedField[Id[D]] = { +// collection.mapping.field.get[Id[D]]("_id").getOrElse(throw new RuntimeException("_id field not specified")).indexed(fieldType = FieldType.Untokenized) +// field[Id[D]]("_id") +// } +// +// object field { +// def get[F](name: String): Option[IndexedField[F]] = fieldsMap +// .get(name) +// .map(_.asInstanceOf[IndexedField[F]]) +// .orElse { +// collection.mapping.field.get[F](name).flatMap { f => +// f +// .features +// .find(_.isInstanceOf[IndexFeature[_]]) +// .map(_.asInstanceOf[IndexFeature[F]]) +// .map { indexFeature => +// val indexedField = IndexedField(indexFeature.createField(name, lucene), f) +// li.synchronized { +// val aif = indexedField.asInstanceOf[IndexedField[Any]] +// _fields = _fields ::: List(aif) +// fieldsMap += name -> aif +// } +// indexedField +// } +// } +// } +// def apply[F](name: String): IndexedField[F] = get[F](name).getOrElse(throw new RuntimeException(s"Field not defined: $name")) +// } +// +// override def put(value: D): IO[D] = IO { +// val fields = collection.mapping.fields.flatMap(f => field.get[Any](f.name)) +// if (fields.nonEmpty && fields.tail.nonEmpty) { // No need to index if _id is the only field +// val fieldsAndValues = fields.map(_.fieldAndValue(value)) +// lucene +// .doc() +// .update(exact(_id.luceneField(value._id))) +// .fields(fieldsAndValues: _*) +// .index() +// } +// value +// } +// +// override def delete(id: Id[D]): IO[Unit] = IO(lucene.delete(exact(_id.luceneField(id)))) +// +// override def commit(): IO[Unit] = IO { +// lucene.commit() +// } +// +// override def count(): IO[Long] = IO { +// lucene.count() +// } +// +// private[lucene] def indexed[F](luceneField: LuceneField[F], field: Field[D, F]): Unit = { +// IndexedField[F](luceneField, field) +// } +// +// private def filter2Lucene(filter: Filter[D]): SearchTerm = { +// def fieldAndValue(field: Field[D, Any], value: Any): FieldAndValue[Any] = this.field[Any](field.name).luceneField(value) +// filter match { +// case Filter.Equals(field, value) => +// val fv = fieldAndValue(field.asInstanceOf[Field[D, Any]], value) +// exact(fv) +// case Filter.NotEquals(field, value) => +// val fv = fieldAndValue(field.asInstanceOf[Field[D, Any]], value) +// none(exact(fv)) +// case Filter.Includes(field, values) => +// val terms = values.map(v => filter2Lucene(Filter.Equals(field.asInstanceOf[Field[D, Any]], v))) +// any(terms: _*) +// case Filter.Excludes(field, values) => +// val terms = values.map(v => filter2Lucene(Filter.Equals(field.asInstanceOf[Field[D, Any]], v))) +// none(terms: _*) +// } +// } +// +// override def search(query: Query[D]): Stream[IO, SearchResult[D]] = { +// var q = lucene.query().offset(query.offset).limit(query.batchSize) +// q = query.filters.foldLeft(q)((qb, f) => q.filter(filter2Lucene(f))) +// q = q.sort(query.sort.map { +// case Sort.BestMatch => LuceneSort.Score +// case Sort.IndexOrder => LuceneSort.IndexOrder +// case Sort.ByField(field, reverse) => LuceneSort(this.field[Any](field.name).luceneField, reverse) +// }: _*) +// +// val pagedResults = q.search() +// val pagedResultsIterator = pagedResults.pagedResultsIterator +// Stream.fromBlockingIterator[IO](pagedResultsIterator, query.batchSize) +// .map(result => LuceneSearchResult(query, pagedResults.total, result)) +// } +// +// override def truncate(): IO[Unit] = IO(lucene.delete(MatchAllSearchTerm)) +// +// override def dispose(): IO[Unit] = IO(lucene.dispose()) +// +// case class IndexedField[F](luceneField: LuceneField[F], field: Field[D, F]) { +// def fieldAndValue(value: D): FieldAndValue[F] = luceneField(field.getter(value)) +// } +// +// case class LuceneSearchResult(query: Query[D], +// total: Long, +// result: L4SSearchResult) extends SearchResult[D] { +// override lazy val id: Id[D] = result(_id.luceneField) +// +// override def get(): IO[D] = collection(id) +// +// override def apply[F](field: Field[D, F]): F = { +// val indexedField = fields.find(_.field.name == field.name).getOrElse(throw new RuntimeException(s"Unable to find indexed field for: ${field.name}")) +// result(indexedField.luceneField).asInstanceOf[F] +// } +// } +//} \ No newline at end of file diff --git a/lucene/src/main/scala/lightdb/index/lucene/LuceneIndexerSupport.scala b/lucene/src/main/scala/lightdb/index/lucene/LuceneIndexerSupport.scala index ab21a6c9..49b45a24 100644 --- a/lucene/src/main/scala/lightdb/index/lucene/LuceneIndexerSupport.scala +++ b/lucene/src/main/scala/lightdb/index/lucene/LuceneIndexerSupport.scala @@ -1,11 +1,122 @@ +/* + Analyzer analyzer = new StandardAnalyzer(); + + Path indexPath = Files.createTempDirectory("tempIndex"); + Directory directory = FSDirectory.open(indexPath); + IndexWriterConfig config = new IndexWriterConfig(analyzer); + IndexWriter iwriter = new IndexWriter(directory, config); + Document doc = new Document(); + String text = "This is the text to be indexed."; + doc.add(new Field("fieldname", text, TextField.TYPE_STORED)); + iwriter.addDocument(doc); + iwriter.close(); + + // Now search the index: + DirectoryReader ireader = DirectoryReader.open(directory); + IndexSearcher isearcher = new IndexSearcher(ireader); + // Parse a simple query that searches for "text": + QueryParser parser = new QueryParser("fieldname", analyzer); + Query query = parser.parse("text"); + ScoreDoc[] hits = isearcher.search(query, 10).scoreDocs; + assertEquals(1, hits.length); + // Iterate through the results: + StoredFields storedFields = isearcher.storedFields(); + for (int i = 0; i < hits.length; i++) { + Document hitDoc = storedFields.document(hits[i].doc); + assertEquals("This is the text to be indexed.", hitDoc.get("fieldname")); + } + ireader.close(); + directory.close(); + IOUtils.rm(indexPath); + */ + package lightdb.index.lucene -import lightdb.Document +import cats.effect.IO +import lightdb.{Document, Id, field} import lightdb.collection.Collection -import lightdb.index.Indexer +import lightdb.index.{Indexer, SearchResult} +import lightdb.query.{Filter, Query} +import org.apache.lucene.analysis.standard.StandardAnalyzer +import org.apache.lucene.index.{DirectoryReader, IndexWriter, IndexWriterConfig} +import org.apache.lucene.store.FSDirectory +import org.apache.lucene.document.{Field, IntField, TextField, Document => LuceneDocument} +import org.apache.lucene.queryparser.classic.QueryParser +import org.apache.lucene.search.IndexSearcher trait LuceneIndexerSupport { protected def autoCommit: Boolean = false def indexer[D <: Document[D]](collection: Collection[D]): Indexer[D] = LuceneIndexer(collection, autoCommit) +} + +case class LuceneIndexer[D <: Document[D]](collection: Collection[D], autoCommit: Boolean = false) extends Indexer[D] { + private lazy val analyzer = new StandardAnalyzer + private lazy val directory = FSDirectory.open(collection.db.directory.map(_.resolve(collection.collectionName)).get) + private lazy val config = new IndexWriterConfig(analyzer) + private lazy val indexWriter = new IndexWriter(directory, config) + + private lazy val indexReader = DirectoryReader.open(directory) + private lazy val indexSearcher = new IndexSearcher(indexReader) + + override def put(value: D): IO[D] = IO { + val document = new LuceneDocument + collection.mapping.fields.foreach { field => + field.getter(value) match { + case id: Id[_] => document.add(new Field(field.name, id.value, TextField.TYPE_STORED)) + case s: String => document.add(new Field(field.name, s, TextField.TYPE_STORED)) + case i: Int => document.add(new IntField(field.name, i, Field.Store.YES)) + case value => throw new RuntimeException(s"Unsupported value: $value (${value.getClass})") + } + } + if (document.iterator().hasNext) { + indexWriter.addDocument(document) + } + value + } + + override def delete(id: Id[D]): IO[Unit] = IO.unit + + override def commit(): IO[Unit] = IO { + scribe.info(s"COMMIT! ${collection.collectionName}") + indexWriter.flush() + indexWriter.close() + } + + override def count(): IO[Long] = IO { + scribe.info(s"COUNT! ${collection.collectionName}") + indexReader.getDocCount("_id") + } + + override def search(query: Query[D]): fs2.Stream[IO, SearchResult[D]] = { + val parser = new QueryParser("_id", analyzer) + val filters = query.filters.map { + case Filter.Equals(field, value) => s"${field.name}:\"$value\"" + case f => throw new UnsupportedOperationException(s"Unsupported filter: $f") + } + val filterString = filters match { + case f :: Nil => f + case list => list.mkString("(", " AND ", ")") + } + val q = parser.parse(filterString) + val hits = indexSearcher.search(q, query.batchSize).scoreDocs + val storedFields = indexSearcher.storedFields() + val results = hits.toList.map { sd => + val document = storedFields.document(sd.doc) + val id = Id[D](document.get("_id")) + val d = collection(id) + new SearchResult[D] { + override def query: Query[D] = query + override def total: Long = ??? + override def id: Id[D] = id + override def get(): IO[D] = d + override def apply[F](field: _root_.lightdb.field.Field[D, F]): F = ??? + } + } + fs2.Stream[IO, SearchResult[D]](results: _*) + } + + override def truncate(): IO[Unit] = ??? + + override def dispose(): IO[Unit] = ??? } \ No newline at end of file diff --git a/lucene/src/main/scala/lightdb/index/lucene/package.scala b/lucene/src/main/scala/lightdb/index/lucene/package.scala index 3feb3761..43f5e8f1 100644 --- a/lucene/src/main/scala/lightdb/index/lucene/package.scala +++ b/lucene/src/main/scala/lightdb/index/lucene/package.scala @@ -1,22 +1,22 @@ -package lightdb.index - -import lightdb.{Document, Id} -import lightdb.field.Field - -package object lucene { - private object _idSupport extends StringBackedValueSupport[Id[Any]] { - override def toString(value: Id[Any]): String = value.value - - override def fromString(s: String): Id[Any] = Id[Any](s) - } - - implicit def idSupport[D]: ValueSupport[Id[D]] = _idSupport.asInstanceOf[ValueSupport[Id[D]]] - - implicit class FieldExtras[D <: Document[D], F](field: Field[D, F]) { - def indexed(fieldType: FieldType = FieldType.Stored, - fullTextSearchable: Boolean = true, // TODO: use default from collection? - sortable: Boolean = true)(implicit vs: ValueSupport[F]): Field[D, F] = { - field.withFeature(IndexFeature[F](fieldType, fullTextSearchable, sortable, vs)) - } - } -} \ No newline at end of file +//package lightdb.index +// +//import lightdb.{Document, Id} +//import lightdb.field.Field +// +//package object lucene { +// private object _idSupport extends StringBackedValueSupport[Id[Any]] { +// override def toString(value: Id[Any]): String = value.value +// +// override def fromString(s: String): Id[Any] = Id[Any](s) +// } +// +// implicit def idSupport[D]: ValueSupport[Id[D]] = _idSupport.asInstanceOf[ValueSupport[Id[D]]] +// +// implicit class FieldExtras[D <: Document[D], F](field: Field[D, F]) { +// def indexed(fieldType: FieldType = FieldType.Stored, +// fullTextSearchable: Boolean = true, // TODO: use default from collection? +// sortable: Boolean = true)(implicit vs: ValueSupport[F]): Field[D, F] = { +// field.withFeature(IndexFeature[F](fieldType, fullTextSearchable, sortable, vs)) +// } +// } +//} \ No newline at end of file