- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 31 for RELU (0.11 sec)
-
tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir
%2 = "tfl.pseudo_const"() {value = dense<0.000000e+00> : tensor<3xf32>} : () -> tensor<3xf32> %3 = "tfl.conv_2d"(%0, %1, %2) { dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : ( tensor<?x5x5x2xf32>, tensor<3x5x5x2xf32>, tensor<3xf32>) -> tensor<?x1x1x3xf32> %4 = "quantfork.stats"(%3) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 15.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-prefer-tf2xla.mlir
data_format = "NHWC", dilations = [1, 1, 1, 1], epsilon = 9.99999974E-5 : f32, explicit_paddings = [], filter_format = "HWIO", fused_ops = ["BiasAdd", "Relu"], leakyrelu_alpha = 2.000000e-01 : f32, num_args = 2 : i64, operandSegmentSizes = array<i32: 1, 1, 2, 2>, padding = "SAME", strides = [1, 1, 1, 1], use_cudnn_on_gpu = true
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 15.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/merge-fusion-with-dequantize.mlir
// RUN: stablehlo-quant-opt %s -stablehlo-merge-fusion-with-dequantize -split-input-file -verify-diagnostics | FileCheck %s // Merge fusion with dequantize for relu case. module attributes {tf_saved_model.semantics} { // CHECK-LABEL: func.func private @merge_relu_fusion func.func private @merge_relu_fusion(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 04 23:45:53 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/schema/schema.fbs
HASHTABLE_LOOKUP = 10, L2_NORMALIZATION = 11, L2_POOL_2D = 12, LOCAL_RESPONSE_NORMALIZATION = 13, LOGISTIC = 14, LSH_PROJECTION = 15, LSTM = 16, MAX_POOL_2D = 17, MUL = 18, RELU = 19, // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed // since different model developers use RELU1 in different ways. Never // create another op called RELU1. RELU_N1_TO_1 = 20,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py
padding: str = 'SAME', has_func_alias: bool = False, ) -> module.Module: class ConvModel(module.Module): """A simple model with a single conv2d, bias and relu.""" def __init__(self): self.out_channel_size = filter_shape[-1] # This ensures filters will have different value range per out channel self.filters = np.stack( [
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 18.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/ir/tfr_ops.td
range for the fused activation `act` with the quantization defined by the `scale` and `zero point`. Currently, the allowed activations are `NONE`, `RELU`, `RELU6` and `RELU_N1_TO_1`. Example: ```mlir %3, %4 = tfr.quant_act_range(%2, %1, %0) : (tfr.attr, float, i64) -> (tfr.tensor, tfr.tensor) ``` }];
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 10:54:29 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
std::vector<Output>* grad_outputs) { auto dx = internal::ReluGrad(scope, grad_inputs[0], op.input(0)); grad_outputs->push_back(dx); return scope.status(); } REGISTER_GRADIENT_OP("Relu", ReluGradHelper); Status Relu6GradHelper(const Scope& scope, const Operation& op, const std::vector<Output>& grad_inputs, std::vector<Output>* grad_outputs) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td
(MHLO_ConstantOp:$one (GetScalarOfType<1> $features)), (BinBroadcastDimensions $one, $features))))>; //===----------------------------------------------------------------------===// // Relu op patterns. //===----------------------------------------------------------------------===// // TODO(hinsu): Make these patterns to TF to TF lowering. Relu6 lowering will
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 06 18:46:23 UTC 2024 - 34.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir
%1 = "tf.Relu"(%0) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32> return %1 : tensor<1x3x2x2xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 25.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_operator.cc
llvm::StringRef str, flatbuffers::FlatBufferBuilder* builder) { return llvm::StringSwitch<tflite::ActivationFunctionType>(str) .Case("NONE", tflite::ActivationFunctionType_NONE) .Case("RELU", tflite::ActivationFunctionType_RELU) .Case("RELU_N1_TO_1", tflite::ActivationFunctionType_RELU_N1_TO_1) .Case("RELU6", tflite::ActivationFunctionType_RELU6) .Case("TANH", tflite::ActivationFunctionType_TANH)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 38K bytes - Viewed (0)