- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 11 for Tokenizer (0.07 sec)
-
src/main/java/org/codelibs/core/text/Tokenizer.java
Registered: Sat Dec 20 08:55:33 UTC 2025 - Last Modified: Sat Jul 05 00:11:05 UTC 2025 - 8.8K bytes - Viewed (0) -
src/main/java/jcifs/smb1/http/Handler.java
if (handler == null) { final String path = System.getProperty(HANDLER_PKGS_PROPERTY); final StringTokenizer tokenizer = new StringTokenizer(path, "|"); while (tokenizer.hasMoreTokens()) { final String provider = tokenizer.nextToken().trim(); if (provider.equals("jcifs.smb1")) { continue; }
Registered: Sat Dec 20 13:44:44 UTC 2025 - Last Modified: Sat Aug 16 01:32:48 UTC 2025 - 6.1K bytes - Viewed (0) -
src/test/java/org/codelibs/core/text/TokenizerTest.java
public void testEOF() throws Exception { final Tokenizer tokenizer = new Tokenizer(""); assertThat(tokenizer.nextToken(), is(Tokenizer.TT_EOF)); assertThat(tokenizer.nextToken(), is(Tokenizer.TT_EOF)); } /** * @throws Exception */ @Test public void testWhitespace() throws Exception { final Tokenizer tokenizer = new Tokenizer("\t \n");
Registered: Sat Dec 20 08:55:33 UTC 2025 - Last Modified: Sat May 10 01:32:17 UTC 2025 - 2K bytes - Viewed (0) -
src/test/java/org/codelibs/opensearch/extension/kuromoji/index/analysis/KuromojiTokenizerFactory.java
import java.io.IOException; import java.io.StringReader; import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode; import org.apache.lucene.analysis.ja.dict.UserDictionary; import org.apache.lucene.analysis.util.CSVUtil;
Registered: Sat Dec 20 13:04:59 UTC 2025 - Last Modified: Sat Mar 15 06:51:20 UTC 2025 - 4.7K bytes - Viewed (0) -
src/main/java/org/codelibs/fess/suggest/converter/KatakanaConverter.java
* use a tokenizer to process the input. * * <p> * The class provides methods to initialize the converter, convert strings, and * check if a tokenizer is enabled. It also includes methods to create a token * stream and extract reading information from the stream's attributes, although * the tokenizer-related functionality is currently commented out. * </p> */
Registered: Sat Dec 20 13:04:59 UTC 2025 - Last Modified: Fri Jul 04 14:00:23 UTC 2025 - 6.1K bytes - Viewed (0) -
src/main/resources/fess_indices/fess_config.data_config.json
"number_of_shards": 1, "number_of_replicas": 0, "auto_expand_replicas": "0-1" }, "analysis": { "analyzer": { "standard_analyzer": { "type": "custom", "tokenizer": "standard", "filter": [ "cjk_width", "asciifolding", "lowercase", "stop", "stemmer" ] } } } }
Registered: Sat Dec 20 09:19:18 UTC 2025 - Last Modified: Thu Dec 02 13:14:56 UTC 2021 - 484 bytes - Viewed (0) -
src/main/java/jcifs/http/Handler.java
if (handler == null) { final String path = System.getProperty(HANDLER_PKGS_PROPERTY); final StringTokenizer tokenizer = new StringTokenizer(path, "|"); while (tokenizer.hasMoreTokens()) { final String provider = tokenizer.nextToken().trim(); if (provider.equals("jcifs")) { continue; }
Registered: Sat Dec 20 13:44:44 UTC 2025 - Last Modified: Sat Aug 16 01:32:48 UTC 2025 - 6.9K bytes - Viewed (0) -
src/test/java/org/codelibs/opensearch/extension/analysis/SynonymLoader.java
@Override protected TokenStreamComponents createComponents(final String fieldName) { final Tokenizer tokenizer = new KeywordTokenizer(); final TokenStream stream = ignoreCase ? new LowerCaseFilter(tokenizer) : tokenizer; return new TokenStreamComponents(tokenizer, stream); } }; }Registered: Sat Dec 20 13:04:59 UTC 2025 - Last Modified: Sun May 18 02:59:16 UTC 2025 - 6.7K bytes - Viewed (1) -
src/test/java/org/codelibs/opensearch/extension/analysis/NGramSynonymTokenizerFactory.java
* either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package org.codelibs.opensearch.extension.analysis; import org.apache.lucene.analysis.Tokenizer; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AbstractTokenizerFactory; /**
Registered: Sat Dec 20 13:04:59 UTC 2025 - Last Modified: Sat Mar 15 06:51:20 UTC 2025 - 2.4K bytes - Viewed (0) -
src/main/resources/fess_indices/fess_config.file_config.json
"number_of_shards": 1, "number_of_replicas": 0, "auto_expand_replicas": "0-1" }, "analysis": { "analyzer": { "standard_analyzer": { "type": "custom", "tokenizer": "standard", "filter": [ "cjk_width", "asciifolding", "lowercase", "stop", "stemmer" ] } } } }
Registered: Sat Dec 20 09:19:18 UTC 2025 - Last Modified: Thu Dec 02 13:14:56 UTC 2021 - 484 bytes - Viewed (0)