- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 66 for RELU (0.03 sec)
-
tensorflow/c/experimental/gradients/nn_grad_test.cc
using tensorflow::TF_StatusPtr; Status ReluModel(AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs) { return ops::Relu(ctx, inputs[0], &outputs[0], "Relu"); } Status SparseSoftmaxCrossEntropyWithLogitsModel( AbstractContext* ctx, absl::Span<AbstractTensorHandle* const> inputs, absl::Span<AbstractTensorHandle*> outputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 28 13:53:47 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir
parameters[ {"quantized_ops": ["${main_op}", "BiasAdd"], "act_func": "internal_requantize_no_activation_fn", "output_type": "!tf_type.qint8"}, {"quantized_ops": ["${main_op}", "BiasAdd", "Relu"], "act_func": "internal_requantize_and_relu_fn", "output_type": "!tf_type.qint8"}, {"quantized_ops": ["${main_op}", "BiasAdd", "Relu6"], "act_func": "internal_requantize_and_relu6_fn", "output_type": "!tf_type.qint8"}, ]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 19.3K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B")); Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C")); Node* d = ops::UnaryOp("UncompilableUnary", c, builder.opts().WithName("D")); Node* e = ops::UnaryOp("Relu", d, builder.opts().WithName("E")); ops::UnaryOp("Relu", e, builder.opts().WithName("F"));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 10:11:10 UTC 2024 - 79.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_move_transposes_end.mlir
// CHECK: %[[TANH:[0-9]*]] = "tf.Tanh"(%arg0) {{.*}} tensor<1x4x4x8xf32> // CHECK: %[[RELU:[0-9]*]] = "tf.Relu"(%[[TANH]]) {{.*}} tensor<1x4x4x8xf32> // CHECK: %[[RES_TRANSPOSE:[0-9]*]] = "tf.Transpose"(%[[RELU]], %[[RES_PERM]]) // CHECK: return %[[RES_TRANSPOSE]] %0 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 9.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/canonicalize.td
// Canonicalize tf.Maximum of zero to tf.Relu //===----------------------------------------------------------------------===// def IsInteger32Pred: CPred< "getElementTypeOrSelf($0.getType()).isInteger(32)">; // Whether the transformation is compatible with the device if given. // Currently, Relu with int32 is not supported on GPU. def IsDeviceCompatible: Constraint<
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 06 18:42:28 UTC 2023 - 17K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir
// CHECK: %[[VAL_0:.*]] = "tf._FusedConv2D"(%arg2, %arg1, %arg0) <{data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 0.000000e+00 : f32, explicit_paddings = [], fused_ops = ["BiasAdd", "Relu"], num_args = 1 : i64, operandSegmentSizes = array<i32: 1, 1, 1, 0>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true}> {TArgs = [f32]} : (tensor<8x32x32x3xf32>, tensor<1x1x3x128xf32>, tensor<128xf32>) -> tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 13.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
{"quantized_ops": ["${main_op}", "Relu"], "act_func": "internal_requantize_and_relu_fn", "output_type": "i8"}, {"quantized_ops": ["${main_op}", "Relu6"], "act_func": "internal_requantize_and_relu6_fn", "output_type": "i8"}, {"quantized_ops": ["${main_op}"], "act_func": "internal_dequantize_no_activation_fn", "output_type": "f32"},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
# If present the last op before return should be stablehlo.clamp for relu6 # and stablehlo.maximum for relu. if activation_fn is nn_ops.relu6: self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return') elif activation_fn is nn_ops.relu: self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return') else: # Check activation functions are implicit.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc
%0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32> return %0 : tensor<1x4x4x3xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir
// CHECK: func private @quantized_conv2d_with_relu6_fn // CHECK: func private @quantized_depthwise_conv2d_with_bias_and_relu_float_output_fn // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D", "BiasAdd", "Relu"] // CHECK: func private @quantized_matmul_with_bias_fn // CHECK: func private @quantized_matmul_with_bias_and_relu_fn // CHECK: func private @quantized_matmul_with_bias_and_relu6_fn // CHECK: func private @quantized_matmul_fn
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 3.3K bytes - Viewed (0)