- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 401 for relu (0.12 sec)
-
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td
(BinBroadcastDimensions $one, $features))))>; //===----------------------------------------------------------------------===// // Relu op patterns. //===----------------------------------------------------------------------===// // TODO(hinsu): Make these patterns to TF to TF lowering. Relu6 lowering will // require HLO canonicalization of min and max on a tensor to ClampOp.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 34.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.td
let hasFolder = 1; } def TFL_ReluOp: TFL_Op<"relu", [ PredOpTrait<"x and y must have same element type", TFL_TCresVTEtIsSameAsOp<0, 0>>, Pure, QuantizableResult, SameOperandsAndResultShape]> { let summary = "Relu operator"; let description = [{ Element-wise Relu operator x -> max(0, x) }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 186K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
std::vector<Output>* grad_outputs) { auto dx = internal::ReluGrad(scope, grad_inputs[0], op.input(0)); grad_outputs->push_back(dx); return scope.status(); } REGISTER_GRADIENT_OP("Relu", ReluGradHelper); Status Relu6GradHelper(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/canonicalize.mlir
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 132.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/ops.mlir
// CHECK: "RELU" %1 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU"} : tensor<4xi32> // CHECK: "RELU_N1_TO_1" %2 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU_N1_TO_1"} : tensor<4xi32> // CHECK: "RELU6" %3 = tfl.add %arg0, %arg1 {fused_activation_function = "RELU6"} : tensor<4xi32> // CHECK: "TANH"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 19:09:08 UTC 2024 - 189.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
dilations: Sequence[int] = (1, 1, 1, 1), padding: str = 'SAME', ): class DepthwiseConvModel(module.Module): """A simple model with a single depthwise conv2d, bias and relu.""" def __init__(self): self.out_channel_size = filter_shape[2] * filter_shape[3] # This ensures filters will have different value range per out channel self.filters = np.stack(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir
%1 = "tf.Relu"(%0) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32> return %1 : tensor<1x3x2x2xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 25.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_operator.cc
return llvm::StringSwitch<tflite::ActivationFunctionType>(str) .Case("NONE", tflite::ActivationFunctionType_NONE) .Case("RELU", tflite::ActivationFunctionType_RELU) .Case("RELU_N1_TO_1", tflite::ActivationFunctionType_RELU_N1_TO_1) .Case("RELU6", tflite::ActivationFunctionType_RELU6) .Case("TANH", tflite::ActivationFunctionType_TANH) .Case("SIGN_BIT", tflite::ActivationFunctionType_SIGN_BIT);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 38K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/python/tfr_gen_test.py
y = _tfr_quant_raw_data(x) s, z = _tfr_quant_qparam(x) s = _tfr_quant_scale_factor(1.0, [s, s]) s = _tfr_quant_scale_factor(1.0, [s]) y = math_ops.Sub(y, z) qmin, qmax = _tfr_quant_act_range('RELU', 1.0, 0) (qmin, qmax) # pylint: disable=pointless-statement d = _tfr_quant_rescale(y, s, 0) e = math_ops.Cast(x=d, DstT=dtypes.int16) f = math_ops.Cast(x=e, DstT=dtypes.int8) return f
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 13 16:33:28 UTC 2021 - 28.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir
%5 = "quantfork.stats"(%4) {layerStats = dense<[-56.2916565, 122.922478]> : tensor<2xf32>} : (tensor<1x4xf32>) -> tensor<1x4xf32> %6 = "tfl.svdf"(%0, %1, %2, %3, %5) {fused_activation_function = "RELU", rank = 1 : i32} : (tensor<1x3xf32>, tensor<2x3xf32>, tensor<2x1xf32>, tensor<2xf32>, tensor<1x4xf32>) -> tensor<1x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 52.6K bytes - Viewed (0)