- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 15 for output_types (0.21 sec)
-
tensorflow/compiler/mlir/tensorflow/transforms/tf_data_optimization.td
def FuseMapAndBatch : Pat< (TF_BatchDatasetV2Op (TF_MapDatasetOp $input_dataset, $other_arguments, $f, $output_types, $output_shapes, $use_inter_op_parallelism, $preserve_cardinality, $force_synchronous, $map_dataset_metadata), $batch_size, $drop_remainder, $parallel_copy, $batch_output_types,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 1.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/decompose_optionals.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
def TF_AnonymousIteratorOp : TF_Op<"AnonymousIterator", [TF_UniqueResourceAllocation]> { let summary = "A container for an iterator resource."; let arguments = (ins ConfinedAttr<TypeArrayAttr, [ArrayMinCount<1>]>:$output_types, ConfinedAttr<TF_ShapeAttrArray, [ArrayMinCount<1>]>:$output_shapes ); let results = (outs Res<TF_ResourceTensor, [{A handle to the iterator that can be passed to a "MakeIterator" or
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td
%1 = "tf.ReduceDataset"(%arg0, %arg1) { Targuments = [], Tstate = [i64], device = "", f = @__reduce_func_1, f._tf_data_function = true, output_shapes = [#tf_type.shape<>], output_types = [i64], use_inter_op_parallelism = true, _xla_compile_device_type="TPU"} : (tensor<!tf_type.variant>, tensor<i64>) -> (tensor<i64>) func.return } ```
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:18:05 UTC 2024 - 99.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
} bool changed = false; int next_op_result = 0; for (auto output_type : main_output_types) { if (tensorflow::IsTokenType(output_type)) continue; auto output_type_ranked = mlir::dyn_cast<RankedTensorType>(output_type); if (output_type_ranked == nullptr) { llvm::errs() << "Unsupported XlaCallModule result type: " << output_type << "\n"; return false; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 07:28:49 UTC 2024 - 134.1K bytes - Viewed (0) -
platforms/core-runtime/stdlib-java-extensions/src/main/java/org/gradle/internal/Cast.java
* * @param outputType The type to cast the input to * @param object The object to be cast (must not be {@code null}) * @param <O> The type to be cast to * @param <I> The type of the object to be vast * @return The input object, cast to the output type */ public static <O, I> O cast(Class<O> outputType, I object) { try {
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Jun 10 14:28:48 UTC 2024 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.h
// to the `output_buffer`. Both `model_buffer` and `output_buffer` should be a // valid FlatBuffer format for Model supported by TFLite. // // The `input_type`, `output_type` and `inference_type` can be float32 / qint8 / // int8 / int16. // // Returns a partially quantized model if `fully_quantize` is false. Returns a // non-OK status if the quantization fails. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model.cc
<< ", output_inference_type: " << tflite::EnumNameTensorType(output_type) << "\n"; mlir::Builder mlir_builder(&context); mlir::Type input_mlir_type = tflite::ConvertElementType(input_type, mlir_builder); mlir::Type output_mlir_type = tflite::ConvertElementType(output_type, mlir_builder); if (fully_quantize) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 6.3K bytes - Viewed (0) -
cluster/images/etcd/Makefile
push: build # split words on hyphen, access by 1-index word-hyphen = $(word $2,$(subst -, ,$1)) sub-build-%: $(MAKE) OUTPUT_TYPE=docker OS=$(call word-hyphen,$*,1) ARCH=$(call word-hyphen,$*,2) build all-build: $(addprefix sub-build-,$(ALL_OS_ARCH)) sub-push-image-%: $(MAKE) OUTPUT_TYPE=registry OS=$(call word-hyphen,$*,1) ARCH=$(call word-hyphen,$*,2) OSVERSION=$(call word-hyphen,$*,3) REGISTRY=$(PUSH_REGISTRY) push
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Thu Jun 06 16:13:15 UTC 2024 - 11.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
return kTfLiteOk; } TfLiteStatus QuantizeModel(ModelT* model, const TensorType& input_type, const TensorType& output_type, bool allow_float, std::string& output_buffer) { return QuantizeModel(model, input_type, output_type, allow_float, /*operator_names=*/{}, TensorType_INT8, output_buffer); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0)