- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 53 for 1xi8 (0.04 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir
} // For weight-only func.func @dequantize_i8(%input : tensor<*xi8>, %scale : tensor<*xf32>, %zp : tensor<*xi32>) -> tensor<*xf32> { // Use identity op to avoid the weight being constant-folded. %identity = "tf.Identity"(%input) : (tensor<*xi8>) -> tensor<*xi8> %input_i32 = "tf.Cast"(%identity) : (tensor<*xi8>) -> tensor<*xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 12.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
%i8 = "tf.Cast"(%round) : (tensor<*xf32>) -> tensor<*xi8> func.return %i8 : tensor<*xi8> } func.func @dequantize_i8(%input : tensor<*xi8>, %scale : tensor<*xf32>, %zp : tensor<*xi32>) -> tensor<*xf32> { // Use identity op to avoid the weight being constant-folded. %identity = "tf.Identity"(%input) : (tensor<*xi8>) -> tensor<*xi8> %input_i32 = "tf.Cast"(%identity) : (tensor<*xi8>) -> tensor<*xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/propagate_quantize_type.mlir
return %3 : tensor<1x2x2x1024xbf16> } func.func private @composite_dequantize_uniform(%arg0: tensor<*xi8>) -> tensor<*xbf16> { %cst = "tf.Const"() {value = dense<1.574710e-02> : tensor<bf16>} : () -> tensor<bf16> %0 = "tf.Cast"(%arg0) {Truncate = false} : (tensor<*xi8>) -> tensor<*xbf16> %1 = "tf.Mul"(%0, %cst) : (tensor<*xbf16>, tensor<bf16>) -> tensor<*xbf16> return %1 : tensor<*xbf16> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/arg-multi-data-type-with-subtype.pbtxt
tensor<i8>, tensor<f64>, tensor<!tf_type.resource>) # CHECK-SUBTYPE: func @main(%arg0: tensor<10xi32>, %arg1: tensor<!tf_type.variant<tensor<10xf32>>>, %arg2: tensor<10xi8>, %arg3: tensor<10xf64>, %arg4: tensor<!tf_type.resource<tensor<10xi32>>>) -> (tensor<10xi32>, tensor<!tf_type.variant<tensor<10xf32>>>, tensor<10xi8>, tensor<10xf64>, tensor<!tf_type.resource<tensor<10xi32>>>)...
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 07 18:11:42 UTC 2022 - 2.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir
// CHECK: %[[maxpool_i8:.*]] = "tf.MaxPool"(%[[sc1]]) // CHECK-SAME: (tensor<*xi8>) -> tensor<*xi8> // CHECK: %[[reshape_i8:.*]] = "tf.Reshape"(%[[maxpool_i8]] // CHECK-SAME: (tensor<*xi8>, tensor<2xi32>) -> tensor<*xi8> // CHECK: %[[sc2:.*]] = "quantfork.scast"(%[[reshape_i8]]) // CHECK: %[[dq:.*]] = "quantfork.dcast"(%[[sc2]]) : (tensor<*x!quant.uniform<i8:f32, 5.000000e-02:-10>>)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops.mlir
%19 = "tf.BroadcastTo"(%4, %17) : (tensor<2x?x?xi8>, tensor<3xi64>) -> tensor<2x?x?xi8> %20 = "tf.BroadcastTo"(%9, %18) : (tensor<2x?x?xi8>, tensor<3xi64>) -> tensor<2x?x?xi8> %21 = "tf.XlaDotV2"(%19, %20) {dimension_numbers = "\22\01\00\1A\01\00\12\01\01\0A\01\02", precision_config = ""} : (tensor<2x?x?xi8>, tensor<2x?x?xi8>) -> tensor<2x?x?xi32> %22 = "tf.Cast"(%19) {Truncate = false} : (tensor<2x?x?xi8>) -> tensor<2x?x?xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 81K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/functional-control-flow-to-cfg.mlir
} func.func private @testIf3Then(tensor<*xf32>) -> (tensor<*xf32>, tensor<*xi8>, tensor<*xbf16>) func.func private @testIf3Else(tensor<*xf32>) -> (tensor<*xf32>, tensor<*xi8>, tensor<*xbf16>) // CHECK-LABEL: func @testIf3Result(%arg0: tensor<i1>, %arg1: tensor<*xf32>) func.func @testIf3Result(tensor<i1>, tensor<*xf32>) -> (tensor<*xf32>, tensor<*xi8>, tensor<*xbf16>) { ^bb0(%arg0: tensor<i1>, %arg1: tensor<*xf32>):
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions.mlir
// CHECK-SAME: (tensor<1x2x2x3xi8>, tensor<2x2x3x2xi8>, tensor<2xi32>, tensor<f32>, tensor<i32>, tensor<2xf32>, tensor<2xi32>, tensor<2xf32>, tensor<2xi32>, tensor<f32>, tensor<i32>) -> tensor<*xi8> // CHECK: %[[cast_1:.*]] = "tf.Cast"(%[[conv_quant]]) <{Truncate = false}> : (tensor<*xi8>) -> tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Nov 06 01:23:21 UTC 2023 - 15.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir
func.return %11 : tensor<*xf32> } // CHECK: %[[maxpool_i8:.*]] = "tf.MaxPool" // CHECK-SAME: (tensor<*xi8>) -> tensor<*xi8> // CHECK: %[[reshape_i8:.*]] = "tf.Reshape"(%[[maxpool_i8]] // CHECK-SAME: (tensor<*xi8>, tensor<2xi32>) -> tensor<*xi8> // CHECK: %[[scast:.*]] = "quantfork.scast"(%[[reshape_i8]] // CHECK: %[[matmul:.*]] = "tf.PartitionedCall"(%[[scast]] // CHECK-SAME: f = @composite_matmul_fn_1
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/tests/raise_to_tf.mlir
func.func @fixed_element_attribute_invalid(%arg0: tensor<2xf32>) -> tensor<2xi8> { %0 = "tfr.cast"(%arg0) : (tensor<2xf32>) -> !tfr.tensor // expected-error@+1 {{type i8_ can't be resolved for the signature of the op}} %1 = tfr.call @tf__invalid_type_op(%0) : (!tfr.tensor) -> !tfr.tensor %2 = "tfr.cast"(%1) : (!tfr.tensor) -> tensor<2xi8> func.return %2 : tensor<2xi8> // CHECK: tfr.call @tf__invalid_type_op
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 6.7K bytes - Viewed (0)