- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 269 for scast (0.32 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize.mlir
func.return %4 : tensor<*xf32> } // CHECK: %[[q:.*]] = "quantfork.qcast"(%arg0) // CHECK: %[[sc1:.*]] = "quantfork.scast"(%[[q]]) : (tensor<*x!quant.uniform<i8:f32, 5.000000e-02:-10>>) // CHECK: %[[fcast:.*]] = "tf.Cast"(%[[sc1]]) <{Truncate = false}> : (tensor<*xi8>) -> tensor<*xf32> // CHECK: %[[avgpool_f32:.*]] = "tf.AvgPool"(%[[fcast]]) // CHECK-SAME: (tensor<*xf32>) -> tensor<*xf32> // CHECK: %[[round:.*]] = "tf.Round"(%[[avgpool_f32]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/QuantOps.cc
addOperations< #define GET_OP_LIST #include "tensorflow/compiler/mlir/quantization/common/ir/QuantOps.cc.inc" >(); } OpFoldResult StorageCastOp::fold(FoldAdaptor) { // Matches x -> [scast -> scast] -> y, replacing the second scast with the // value of x if the casts invert each other. auto srcScastOp = getArg().getDefiningOp<StorageCastOp>(); if (!srcScastOp || srcScastOp.getArg().getType() != getType())
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.cc
addOperations< #define GET_OP_LIST #include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.cc.inc" >(); } OpFoldResult StorageCastOp::fold(FoldAdaptor) { // Matches x -> [scast -> scast] -> y, replacing the second scast with the // value of x if the casts invert each other. auto srcScastOp = getArg().getDefiningOp<StorageCastOp>(); if (!srcScastOp || srcScastOp.getArg().getType() != getType())
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize.mlir
// CHECK: %[[maxpool:.*]] = "tf.MaxPool" // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[maxpool]]) // CHECK-SAME: quant.uniform<i8:f32, 5.000000e-02:-10> // CHECK: %[[dq1:.*]] = "quantfork.dcast"(%[[q1]]) // CHECK-SAME: quant.uniform<i8:f32, 5.000000e-02:-10> // CHECK: %[[reshape:.*]] = "tf.Reshape"(%[[dq1]] // CHECK: %[[q2:.*]] = "quantfork.qcast"(%[[reshape]]) // CHECK-SAME: quant.uniform<i8:f32, 5.000000e-02:-10>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Dec 29 02:42:57 UTC 2022 - 2.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq.mlir
// CHECK: %[[q0:.*]] = "quantfork.qcast"(%arg0) // CHECK: %[[dq0:.*]] = "quantfork.dcast"(%[[q0]]) // CHECK-SAME: quant.uniform<i8:f32, 0.010039215461880554:-1> // CHECK: %[[maxpool:.*]] = "tf.MaxPool"(%[[dq0]]) // CHECK: %[[q1:.*]] = "quantfork.qcast"(%[[maxpool]]) // CHECK-SAME: quant.uniform<i8:f32, 0.010039215461880554:-1> // CHECK: %[[dq1:.*]] = "quantfork.dcast"(%[[q1]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 01 10:21:29 UTC 2023 - 9.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/convert_fake_quant_to_qdq.mlir
} // CHECK: %[[q1:.*]] = "quantfork.qcast"(%arg0) // CHECK-SAME: tensor<3x!quant.uniform<i8:f32, 0.0076314610593459188:-3>> // CHECK: %[[dq1:.*]] = "quantfork.dcast"(%[[q1]]) // CHECK: %[[q2:.*]] = "quantfork.qcast"(%arg1) // CHECK-SAME: tensor<4x3x!quant.uniform<i8<-127:127>:f32:1, {0.003937007874015748,0.0039370079913477263:-25,0.003937007874015748:51}>> // CHECK: %[[dq2:.*]] = "quantfork.dcast"(%[[q2]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 24 07:02:54 UTC 2022 - 2.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq_per_channel.mlir
// CHECK: %[[dq1:.*]] = "quantfork.dcast"(%[[q1]]) // CHECK: %[[q2:.*]] = "quantfork.qcast"(%arg0) // CHECK-SAME: tensor<1x3x4x3x!quant.uniform<i8:f32, 0.58754816990272674:-128>> // CHECK: %[[dq2:.*]] = "quantfork.dcast"(%[[q2]]) // CHECK: %[[call:.*]] = "tf.PartitionedCall"(%[[dq2]], %[[dq1]], %[[dq0]]) // CHECK-SAME: f = @composite_conv2d_with_bias_and_relu6_fn_10 // CHECK: %[[q3:.*]] = "quantfork.qcast"(%[[call]]) {volatile}
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 01 10:21:29 UTC 2023 - 4.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/prepare_quantize/prepare_quantize_per_channel.mlir
// CHECK: %[[q_weight:.*]] = "quantfork.qcast" // CHECK-SAME: -> tensor<2x2x!quant.uniform<i8<-127:127>:f32:1, {0.049663885371891529,0.060200210631363035}>> // CHECK: %[[dq_weight:.*]] = "quantfork.dcast"(%[[q_weight]]) %cst = "tf.Const"() {device = "", value = dense<[[-6.30731344, 5.4962182], [1.80364347, -7.64542675]]> : tensor<2x2xf32>} : () -> tensor<2x2xf32> // CHECK: %[[q_act:.*]] = "quantfork.qcast"(%arg0)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 26 07:48:15 UTC 2024 - 8.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/tf_to_quant_4bit.mlir
// CHECK: %[[fq:.*]] = "tf.FakeQuantWithMinMaxVarsPerChannel"(%arg0, %cst, %cst_0) // CHECK: %[[q:.*]] = "quantfork.qcast"(%[[fq]]) : (tensor<8x3xf32>) -> tensor<8x3x!quant.uniform<i4:f32:1, {1.000000e+00:-8,1.000000e+00:-7,1.000000e+00:-8}>> // CHECK: %[[dq:.*]] = "quantfork.dcast"(%[[q]]) // CHECK: return %[[dq]] } // CHECK-LABEL: fakeQuantForActivation
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/tf_to_quant.mlir
// CHECK: %[[fq:.*]] = "tf.FakeQuantWithMinMaxVarsPerChannel"(%arg0, %cst, %cst_0) // CHECK: %[[q:.*]] = "quantfork.qcast"(%[[fq]]) : (tensor<8x3xf32>) -> tensor<8x3x!quant.uniform<i8:f32:1, {1.000000e+00:-128,1.000000e+00:-127,1.000000e+00:-128}>> // CHECK: %[[dq:.*]] = "quantfork.dcast"(%[[q]]) // CHECK: return %[[dq]] } // CHECK-LABEL: fakeQuantForActivation
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9.5K bytes - Viewed (0)