- Sort Score
- Result 10 results
- Languages All
Results 11 - 18 of 18 for F32 (0.04 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
// * Input tensors are per-tensor uniform quantized (i8->f32) // tensors (full integer) with shape [..., r_x, c_x] or [..., c_x, r_x]. // * The filter tensor is a per-tensor uniform quantized (i8->f32) tensor // (constant or activation) with shape [..., r_y, c_y] or [..., c_y, r_y]. // * Output tensors are per-tensor uniform quantized (i8->f32) or // per-channel uniform quantized (i32->f32) tensors. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
llvm::function_ref<APFloat(APFloat, APFloat)> float_calculate, llvm::function_ref<APInt(APInt, APInt)> int_calculate) { // Note: All types are wrapped in tensor types in TFlite. E.g., f32 is // represented as tensor<f32>. So we are only handling tensor types here. auto type = result_type.dyn_cast<ShapedType>(); if (!type) return {}; auto elemType = type.getElementType(); if (elemType.isa<FloatType>())
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize.cc
// TODO(b/180752069): Figure out new bias' type when old bias is empty. return failure(); } // The FC relies on constant folding, which is implemented on F32. Checks // types to be F32. { if (!IsF32Value(add_op.getRhs()) || !IsF32Value(fc_op.getFilter()) || !IsF32Value(old_bias)) return failure(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 102.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/extract_outside_compilation.mlir
"tf_device.cluster"() ({ "tf.IfRegion"(%pred) ({ %wtensor = "tf.Const"() {device = "", value = dense<0.0> : tensor<f32>} : () -> tensor<f32> "tf.WriteSummary"(%writer, %step, %wtensor, %tag, %wmetadata) {_xla_outside_compilation = "auto"} : (tensor<*x!tf_type.resource>, tensor<i64>, tensor<f32>, tensor<!tf_type.string>, tensor<!tf_type.string>) -> ()
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:59:10 UTC 2023 - 129.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_rewrite.mlir
// device_coordinates: 1 // The following OpSharding is used for TPU computation inputs in below test: // Proto debug string: // input 0 // type: OTHER // tile_shape { // element_type: F32 // dimensions: 2 // dimensions: 2 // layout { // minor_to_major: 1 // minor_to_major: 0 // format: DENSE // } // is_dynamic_dimension: false
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 22:03:30 UTC 2024 - 172.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py
sess, tags=tags, export_dir=self._output_saved_model_path ) # The graph should contain a quantized function call (it contains a # single f32 matmul node). self.assertTrue( self._contains_quantized_function_call( output_meta_graph_def.graph_def ) ) self.assertCountEqual(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 235.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 154.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
// is up to ~2x faster. const bool is_f16 = input_ty.getElementType().isF16(); if (is_f16 && CanUseTensorCores(devices)) return "NHWC"; // For f32/f16 data type decision depends on the filter size in spatial // dimensions, for other data types we keep current data format. if (!input_ty.getElementType().isF32() && !input_ty.getElementType().isF16())
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0)