- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for Tokenize (0.02 sec)
-
src/main/java/org/codelibs/fess/suggest/converter/KatakanaConverter.java
* use a tokenizer to process the input. * * <p> * The class provides methods to initialize the converter, convert strings, and * check if a tokenizer is enabled. It also includes methods to create a token * stream and extract reading information from the stream's attributes, although * the tokenizer-related functionality is currently commented out. * </p> */
Registered: Fri Sep 19 09:08:11 UTC 2025 - Last Modified: Fri Jul 04 14:00:23 UTC 2025 - 6.1K bytes - Viewed (0) -
src/test/java/org/codelibs/opensearch/extension/analysis/SynonymLoader.java
@Override protected TokenStreamComponents createComponents(final String fieldName) { final Tokenizer tokenizer = new KeywordTokenizer(); final TokenStream stream = ignoreCase ? new LowerCaseFilter(tokenizer) : tokenizer; return new TokenStreamComponents(tokenizer, stream); } }; }Registered: Fri Sep 19 09:08:11 UTC 2025 - Last Modified: Sun May 18 02:59:16 UTC 2025 - 6.7K bytes - Viewed (1) -
src/test/java/org/codelibs/opensearch/extension/kuromoji/index/analysis/KuromojiTokenizerFactory.java
import java.io.IOException; import java.io.StringReader; import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode; import org.apache.lucene.analysis.ja.dict.UserDictionary; import org.apache.lucene.analysis.util.CSVUtil;
Registered: Fri Sep 19 09:08:11 UTC 2025 - Last Modified: Sat Mar 15 06:51:20 UTC 2025 - 4.7K bytes - Viewed (0) -
src/test/java/org/codelibs/opensearch/extension/analysis/NGramSynonymTokenizerFactory.java
* either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package org.codelibs.opensearch.extension.analysis; import org.apache.lucene.analysis.Tokenizer; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AbstractTokenizerFactory; /**
Registered: Fri Sep 19 09:08:11 UTC 2025 - Last Modified: Sat Mar 15 06:51:20 UTC 2025 - 2.4K bytes - Viewed (0) -
src/test/java/org/codelibs/fess/suggest/analysis/SuggestAnalyzerTest.java
List<AnalyzeToken> tokens = analyzer.analyzeAndReading(text, field, lang); assertNotNull(tokens); assertEquals(2, tokens.size()); // "Test123" and "ABC-456" (hyphen is not a split char in our tokenizer) assertEquals("Test123", tokens.get(0).getTerm()); assertEquals("ABC-456", tokens.get(1).getTerm()); assertEquals("TEST123", ((TestAnalyzeToken) tokens.get(0)).getReading());Registered: Fri Sep 19 09:08:11 UTC 2025 - Last Modified: Mon Sep 01 13:33:03 UTC 2025 - 15.7K bytes - Viewed (0) -
src/test/java/org/codelibs/fess/suggest/converter/AnalyzerConverterTest.java
.put("analysis.analyzer.test_reading_analyzer.tokenizer", "standard") .put("analysis.analyzer.test_reading_term_analyzer.type", "custom") .put("analysis.analyzer.test_reading_term_analyzer.tokenizer", "standard") .put("analysis.analyzer.test_reading_analyzer_ja.type", "custom") .put("analysis.analyzer.test_reading_analyzer_ja.tokenizer", "keyword")
Registered: Fri Sep 19 09:08:11 UTC 2025 - Last Modified: Mon Sep 01 13:33:03 UTC 2025 - 12.5K bytes - Viewed (0) -
src/test/java/org/codelibs/opensearch/extension/analysis/NGramSynonymTokenizer.java
*/ import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.PriorityQueue; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.synonym.SynonymMap; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
Registered: Fri Sep 19 09:08:11 UTC 2025 - Last Modified: Sat Mar 15 06:51:20 UTC 2025 - 17K bytes - Viewed (0)