- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 42 for tokenizer (0.05 sec)
-
src/main/resources/fess_indices/_cloud/fess.json
"type": "custom", "tokenizer": "standard", "filter": [ "truncate20_filter", "lowercase", "arabic_stop", "arabic_normalization", "arabic_keywords", "arabic_override", "arabic_stemmer" ] }, "armenian_analyzer": { "tokenizer": "standard", "filter": [
Registered: Thu Sep 04 12:52:25 UTC 2025 - Last Modified: Sat Feb 27 09:26:16 UTC 2021 - 117.3K bytes - Viewed (0) -
README.md
String escaped = JsonUtil.escape("Hello \"World\" with special chars"); String unescaped = JsonUtil.unescape(escaped); // Text tokenization Tokenizer tokenizer = new Tokenizer("field1,field2,field3", ","); while (tokenizer.hasMoreTokens()) { String token = tokenizer.nextToken(); // Process each token } // Decimal formatting DecimalFormat format = DecimalFormatUtil.getDecimalFormat("###,###.00"); ```
Registered: Fri Sep 05 20:58:11 UTC 2025 - Last Modified: Sun Aug 31 02:56:02 UTC 2025 - 12.7K bytes - Viewed (0) -
src/test/java/org/codelibs/fess/suggest/analysis/SuggestAnalyzerTest.java
List<AnalyzeToken> tokens = analyzer.analyzeAndReading(text, field, lang); assertNotNull(tokens); assertEquals(2, tokens.size()); // "Test123" and "ABC-456" (hyphen is not a split char in our tokenizer) assertEquals("Test123", tokens.get(0).getTerm()); assertEquals("ABC-456", tokens.get(1).getTerm()); assertEquals("TEST123", ((TestAnalyzeToken) tokens.get(0)).getReading());
Registered: Fri Sep 19 09:08:11 UTC 2025 - Last Modified: Mon Sep 01 13:33:03 UTC 2025 - 15.7K bytes - Viewed (0) -
src/main/resources/fess_indices/fess_config.file_config.json
"number_of_shards": 1, "number_of_replicas": 0, "auto_expand_replicas": "0-1" }, "analysis": { "analyzer": { "standard_analyzer": { "type": "custom", "tokenizer": "standard", "filter": [ "cjk_width", "asciifolding", "lowercase", "stop", "stemmer" ] } } } }
Registered: Thu Sep 04 12:52:25 UTC 2025 - Last Modified: Thu Dec 02 13:14:56 UTC 2021 - 484 bytes - Viewed (0) -
src/main/resources/fess_indices/fess_config.web_config.json
"number_of_shards": 1, "number_of_replicas": 0, "auto_expand_replicas": "0-1" }, "analysis": { "analyzer": { "standard_analyzer": { "type": "custom", "tokenizer": "standard", "filter": [ "cjk_width", "asciifolding", "lowercase", "stop", "stemmer" ] } } } }
Registered: Thu Sep 04 12:52:25 UTC 2025 - Last Modified: Thu Dec 02 13:14:56 UTC 2021 - 484 bytes - Viewed (0) -
compat/maven-artifact/src/main/java/org/apache/maven/artifact/versioning/DefaultArtifactVersion.java
Registered: Sun Sep 07 03:35:12 UTC 2025 - Last Modified: Fri Jun 06 14:28:57 UTC 2025 - 6.1K bytes - Viewed (0) -
src/test/java/org/codelibs/opensearch/extension/analysis/NGramSynonymTokenizer.java
*/ import java.io.IOException; import java.util.ArrayList; import java.util.Comparator; import java.util.List; import java.util.PriorityQueue; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.synonym.SynonymMap; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
Registered: Fri Sep 19 09:08:11 UTC 2025 - Last Modified: Sat Mar 15 06:51:20 UTC 2025 - 17K bytes - Viewed (0) -
src/cmd/asm/internal/lex/input.go
if i > 0 { name, value = name[:i], name[i+1:] } tokens := Tokenize(name) if len(tokens) != 1 || tokens[0].ScanToken != scanner.Ident { fmt.Fprintf(os.Stderr, "asm: parsing -D: %q is not a valid identifier name\n", tokens[0]) flags.Usage() } macros[name] = &Macro{ name: name, args: nil, tokens: Tokenize(value), } } return macros } var panicOnError bool // For testing.
Registered: Tue Sep 09 11:13:09 UTC 2025 - Last Modified: Fri Sep 06 13:17:27 UTC 2024 - 12.5K bytes - Viewed (0) -
src/cmd/asm/internal/asm/pseudo_test.go
package asm import ( "strings" "testing" "cmd/asm/internal/lex" ) func tokenize(s string) [][]lex.Token { res := [][]lex.Token{} if len(s) == 0 { return res } for _, o := range strings.Split(s, ",") { res = append(res, lex.Tokenize(o)) } return res } func TestErroneous(t *testing.T) { type errtest struct { pseudo string operands string
Registered: Tue Sep 09 11:13:09 UTC 2025 - Last Modified: Tue Aug 29 07:48:38 UTC 2023 - 3.1K bytes - Viewed (0) -
src/cmd/asm/internal/asm/expr_test.go
} func TestExpr(t *testing.T) { p := NewParser(nil, nil, nil) // Expression evaluation uses none of these fields of the parser. for i, test := range exprTests { p.start(lex.Tokenize(test.input)) result := int64(p.expr()) if result != test.output { t.Errorf("%d: %q evaluated to %d; expected %d", i, test.input, result, test.output) } tok := p.next()
Registered: Tue Sep 09 11:13:09 UTC 2025 - Last Modified: Tue Aug 29 07:48:38 UTC 2023 - 3.2K bytes - Viewed (0)