- Sort Score
- Result 10 results
- Languages All
Results 151 - 160 of 1,018 for indices (1.25 sec)
-
tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
} (*types)[index] = n->output_type(0); } } return absl::OkStatus(); } // Renumber the indices of _Arg nodes in a graph, according to // 'permutation' that maps old indices to new indices. static Status RenumberArguments(Graph* graph, const std::vector<int>& permutation) { for (Node* n : graph->op_nodes()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 51K bytes - Viewed (0) -
src/cmd/compile/internal/types2/typestring.go
tparams *TypeParamList // local type parameters paramNames bool // if set, write function parameter names, otherwise, write types only tpSubscripts bool // if set, write type parameter indices as subscripts pkgInfo bool // package-annotate first unexported-type field to avoid confusing type description } func newTypeWriter(buf *bytes.Buffer, qf Qualifier) *typeWriter {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 03:01:18 UTC 2024 - 12.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
// new argument index that corresponds to each original index (-1 means it is // removed). If remaining_resource_data_types is provided, it will store the // data types of the remaining resource arguments, where the indices are after // removing unused ones. void RemoveUnusedResourceArgumentsAndForwardedRetvals( const llvm::SmallDenseMap<int64_t, ResourceArgUseInfo>& infos, func::FuncOp func_op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 55.1K bytes - Viewed (0) -
src/regexp/all_test.go
} } } type subexpIndex struct { name string index int } type subexpCase struct { input string num int names []string indices []subexpIndex } var emptySubexpIndices = []subexpIndex{{"", -1}, {"missing", -1}} var subexpCases = []subexpCase{ {``, 0, nil, emptySubexpIndices}, {`.*`, 0, nil, emptySubexpIndices},
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Mar 25 19:36:03 UTC 2024 - 25.8K bytes - Viewed (0) -
src/cmd/link/internal/ld/pcln.go
for ; p < q; p += SUBBUCKETSIZE { i = int((p - min) / SUBBUCKETSIZE) if indexes[i] > idx { indexes[i] = idx } } i = int((q - 1 - min) / SUBBUCKETSIZE) if indexes[i] > idx { indexes[i] = idx } idx++ } // fill in table for i := int32(0); i < nbuckets; i++ { base := indexes[i*SUBBUCKETS] if base == NOIDX { Errorf(nil, "hole in findfunctab") }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Feb 21 22:16:54 UTC 2024 - 29.6K bytes - Viewed (0) -
src/compress/bzip2/bzip2.go
} // The Huffman tree can switch every 50 symbols so there's a list of // tree indexes telling us which tree to use for each 50 symbol block. numSelectors := br.ReadBits(15) treeIndexes := make([]uint8, numSelectors) // The tree indexes are move-to-front transformed and stored as unary // numbers. mtfTreeDecoder := newMTFDecoderWithRange(numHuffmanTrees)
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri Apr 26 13:32:40 UTC 2024 - 13K bytes - Viewed (0) -
src/go/types/typestring.go
tparams *TypeParamList // local type parameters paramNames bool // if set, write function parameter names, otherwise, write types only tpSubscripts bool // if set, write type parameter indices as subscripts pkgInfo bool // package-annotate first unexported-type field to avoid confusing type description } func newTypeWriter(buf *bytes.Buffer, qf Qualifier) *typeWriter {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 23 03:01:18 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc
if (lstm_func.getNumArguments() == 7) return failure(); // We should know the batch size in advance for the lstm fusion. // A good indicator of batch size is both cell state and input state (indices // 1 & 2) have fixed shape and other input tenors should have ranked tensor // types. for (int i = 0; i < 6; ++i) { auto input = lstm_func.getArgument(i);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
assert(output_axis < num_dimensions); const int input_axis = perm[output_axis]; for (int i = 0; i < output_shape[output_axis]; ++i) { // Update the input indices on `input_axis`. assert(input_axis < input_indices->size()); input_indices->operator[](input_axis) = static_cast<uint64_t>(i); // Write the value from `input_tensor` if it is the last axis or
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
padded_shape[i] += padding; } return padded_shape; } std::pair<int64_t, int64_t> GetDimSize( const ArrayRef<int64_t> shape, const ArrayRef<int64_t> indexes) const { return {shape[indexes[0]], shape[indexes[1]]}; } bool IsTransposeConv( stablehlo::ConvolutionOp op, stablehlo::ConvDimensionNumbersAttr dimension_numbers) const {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0)