- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 144 for relu (0.18 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir
// CHECK: func private @quantized_conv2d_with_relu6_fn // CHECK: func private @quantized_depthwise_conv2d_with_bias_and_relu_float_output_fn // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D", "BiasAdd", "Relu"] // CHECK: func private @quantized_matmul_with_bias_fn // CHECK: func private @quantized_matmul_with_bias_and_relu_fn // CHECK: func private @quantized_matmul_with_bias_and_relu6_fn // CHECK: func private @quantized_matmul_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/integration/node_expansion_test.py
sq = gen_composite_ops.my_biased_dense(t1, t2, t3, act='relu') self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12]) def testWithKnownKernel(self): def biasd_dense_elu(x, y, z): dot = gen_composite_ops.my_biased_dense(x, y, z) return nn_ops.elu(dot) # with known kernel, should not expand. t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/integration/graph_decompose_test.py
t3 = constant_op.constant([[-10.0, -10.0], [-10.0, -10.0]]) sq = biased_dense(t1, t2, t3, act='relu') self.assertAllEqual(sq.numpy().reshape(-1), [0, 0, 5, 12]) def testWithKnownKernel(self): @def_function.function def biasd_dense_elu(x, y, z): dot = gen_composite_ops.my_biased_dense(x, y, z) return nn_ops.elu(dot) # with known kernel, should not expand. t1 = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc
// tfl.Abs / tfl.Average_pool_2d / tfl.Cos / tfl.div / tfl.exp / tfl.hardswish / // tfl.log / tfl.logistic / tfl.max_pool_2d / tfl.mirror_pad / tfl.maximum / // tfl.custom / tfl.mean / tfl.minimum / tfl.pad / tfl.pow / tfl.prelu / // tfl.relu / tfl.relu6 / tfl.rsqrt / tfl.sin / tfl.slice / tfl.softmax / // tfl.space_to_depth / tfl.sqrt / tfl.square / tfl.squared_difference / // tfl.strided_slice / tfl.tanh / tfl.transpose / tfl.transpose_conv
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 06 03:08:33 UTC 2023 - 7.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir
// tf.MyCustomOp is the result of conversion to a Custom op %2 = "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("MyCustomOp") %3 = "tfl.exp"(%2) : (tensor<4xf32>) -> tensor<4xf32> loc("exp") func.return %3 : tensor<4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td
// These should match the ActivationFunctionType enum in TFLite schema. def TFL_AFEnum_None : I32EnumAttrCase<"NONE", 0>; def TFL_AFEnum_Relu : I32EnumAttrCase<"RELU", 1>; def TFL_AFEnum_Relu1 : I32EnumAttrCase<"RELU_N1_TO_1", 2>; def TFL_AFEnum_Relu6 : I32EnumAttrCase<"RELU6", 3>; def TFL_AFEnum_Tanh : I32EnumAttrCase<"TANH", 4>; def TFL_AFEnum_Sign : I32EnumAttrCase<"SIGN_BIT", 5>; def TFL_AFAttr : TFL_AnyStrAttrOf<[
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Oct 20 00:05:24 UTC 2022 - 6.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize_no_verify.mlir
%cst = arith.constant dense<0.0> : tensor<2x3xbf16> %0 = "tfl.maximum"(%arg0, %cst) : (tensor<2x3xbf16>, tensor<2x3xbf16>) -> tensor<2x3xbf16> func.return %0 : tensor<2x3xbf16> // CHECK: %[[RESULT:.*]] = "tfl.relu"(%arg0) // CHECK: return %[[RESULT]] } // CHECK-LABEL: fuseScalarAddIntoConv2dBf16 func.func @fuseScalarAddIntoConv2dBf16(%arg0: tensor<256x32x32x3xbf16>, %arg1: tensor<16x3x3x3xbf16>) -> tensor<256x8x7x16xbf16> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla.mlir
%3 = "tf.BiasAdd"(%2, %cst_0) {data_format = "NHWC", device = ""} : (tensor<1x3x2x2xf32>, tensor<2xf32>) -> tensor<1x3x2x2xf32> %4 = "tf.Relu"(%3) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32> %5 = "quantfork.qcast"(%4) : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2x!quant.uniform<i8:f32, 0.0027450981093387976:-19>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc
func_op.eraseResult(0); func_op.insertResult(0, new_call_op.getResult(0).getType(), /*resultAttrs=*/nullptr); // Modify the quantized fused function to do dequantize+relu(6). rewriter.setInsertionPoint(req_op); Value new_result = rewriter.create<mlir::stablehlo::UniformDequantizeOp>( req_op.getLoc(), func_op.getResultTypes()[0], req_op.getOperand());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/tf_to_corert_pipeline.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 00:18:59 UTC 2024 - 7.7K bytes - Viewed (0)