- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 39 for Tokenize (0.13 sec)
-
src/main/resources/suggest_indices/suggest_analyzer.json
"tokenizer" : "standard" }, "reading_term_analyzer_mk" : { "type" : "custom", "tokenizer" : "standard" }, "normalize_analyzer_mk" : { "type" : "custom", "tokenizer" : "keyword", "char_filter" : ["mapping_char"], "filter" : ["lowercase"] }, "contents_analyzer_mk" : { "type" : "custom", "tokenizer" : "standard",
Registered: Thu Oct 31 13:40:30 UTC 2024 - Last Modified: Thu May 23 05:09:51 UTC 2019 - 57.7K bytes - Viewed (0) -
src/test/java/org/codelibs/core/text/TokenizerTest.java
public void testEOF() throws Exception { final Tokenizer tokenizer = new Tokenizer(""); assertThat(tokenizer.nextToken(), is(Tokenizer.TT_EOF)); assertThat(tokenizer.nextToken(), is(Tokenizer.TT_EOF)); } /** * @throws Exception */ @Test public void testWhitespace() throws Exception { final Tokenizer tokenizer = new Tokenizer("\t \n");
Registered: Fri Nov 01 20:58:10 UTC 2024 - Last Modified: Thu Mar 07 01:59:08 UTC 2024 - 2K bytes - Viewed (0) -
src/main/java/jcifs/http/Handler.java
} if ( handler == null ) { String path = System.getProperty(HANDLER_PKGS_PROPERTY); StringTokenizer tokenizer = new StringTokenizer(path, "|"); while ( tokenizer.hasMoreTokens() ) { String provider = tokenizer.nextToken().trim(); if ( provider.equals("jcifs") ) continue;
Registered: Sun Nov 03 00:10:13 UTC 2024 - Last Modified: Sun Jul 01 13:12:10 UTC 2018 - 6.9K bytes - Viewed (0) -
src/main/java/jcifs/smb1/http/Handler.java
} if (handler == null) { String path = System.getProperty(HANDLER_PKGS_PROPERTY); StringTokenizer tokenizer = new StringTokenizer(path, "|"); while (tokenizer.hasMoreTokens()) { String provider = tokenizer.nextToken().trim(); if (provider.equals("jcifs.smb1")) continue;
Registered: Sun Nov 03 00:10:13 UTC 2024 - Last Modified: Fri Mar 22 20:39:42 UTC 2019 - 6.1K bytes - Viewed (0) -
guava/src/com/google/common/net/MediaType.java
checkNotNull(input); Tokenizer tokenizer = new Tokenizer(input); try { String type = tokenizer.consumeToken(TOKEN_MATCHER); consumeSeparator(tokenizer, '/'); String subtype = tokenizer.consumeToken(TOKEN_MATCHER); ImmutableListMultimap.Builder<String, String> parameters = ImmutableListMultimap.builder(); while (tokenizer.hasMore()) { consumeSeparator(tokenizer, ';');
Registered: Fri Nov 01 12:43:10 UTC 2024 - Last Modified: Thu Sep 26 19:15:09 UTC 2024 - 47.5K bytes - Viewed (0) -
okhttp/src/main/kotlin/okhttp3/internal/http/HttpHeaders.kt
* [TOKEN_DELIMITERS]. Returns null if the buffer is empty or prefixed with a delimiter. */ private fun Buffer.readToken(): String? { var tokenSize = indexOfElement(TOKEN_DELIMITERS) if (tokenSize == -1L) tokenSize = size return when { tokenSize != 0L -> readUtf8(tokenSize) else -> null } } fun CookieJar.receiveHeaders( url: HttpUrl, headers: Headers, ) {
Registered: Fri Nov 01 11:42:11 UTC 2024 - Last Modified: Mon Jan 08 01:13:22 UTC 2024 - 7.2K bytes - Viewed (0) -
src/test/java/org/codelibs/opensearch/extension/kuromoji/index/analysis/KuromojiTokenizerFactory.java
import java.io.IOException; import java.io.StringReader; import java.util.HashSet; import java.util.List; import java.util.Set; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer; import org.apache.lucene.analysis.ja.JapaneseTokenizer.Mode; import org.apache.lucene.analysis.ja.dict.UserDictionary; import org.apache.lucene.analysis.util.CSVUtil;
Registered: Fri Nov 08 09:08:12 UTC 2024 - Last Modified: Thu Feb 22 01:36:54 UTC 2024 - 4.7K bytes - Viewed (0) -
src/test/java/org/codelibs/opensearch/extension/analysis/SynonymLoader.java
@Override protected TokenStreamComponents createComponents(final String fieldName) { final Tokenizer tokenizer = new KeywordTokenizer(); final TokenStream stream = ignoreCase ? new LowerCaseFilter(tokenizer) : tokenizer; return new TokenStreamComponents(tokenizer, stream); } }; }
Registered: Fri Nov 08 09:08:12 UTC 2024 - Last Modified: Thu Feb 22 01:36:54 UTC 2024 - 6.7K bytes - Viewed (0) -
src/test/java/org/codelibs/opensearch/extension/analysis/NGramSynonymTokenizerFactory.java
* either express or implied. See the License for the specific language * governing permissions and limitations under the License. */ package org.codelibs.opensearch.extension.analysis; import org.apache.lucene.analysis.Tokenizer; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; import org.opensearch.index.analysis.AbstractTokenizerFactory; /**
Registered: Fri Nov 08 09:08:12 UTC 2024 - Last Modified: Thu Feb 22 01:36:54 UTC 2024 - 2.4K bytes - Viewed (0) -
src/cmd/asm/internal/asm/line_test.go
}) } func testBadInstParser(t *testing.T, goarch string, tests []badInstTest) { for i, test := range tests { arch, ctxt := setArch(goarch) tokenizer := lex.NewTokenizer("", strings.NewReader(test.input+"\n"), nil) parser := NewParser(ctxt, arch, tokenizer) err := tryParse(t, func() { parser.Parse() }) switch { case err == nil:
Registered: Tue Nov 05 11:13:11 UTC 2024 - Last Modified: Tue Aug 29 07:48:38 UTC 2023 - 1.9K bytes - Viewed (0)