Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 138 for relu6 (0.05 sec)

  1. tensorflow/compiler/mlir/lite/schema/schema.fbs

      LOGISTIC = 14,
      LSH_PROJECTION = 15,
      LSTM = 16,
      MAX_POOL_2D = 17,
      MUL = 18,
      RELU = 19,
      // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
      // since different model developers use RELU1 in different ways. Never
      // create another op called RELU1.
      RELU_N1_TO_1 = 20,
      RELU6 = 21,
      RESHAPE = 22,
      RESIZE_BILINEAR = 23,
      RNN = 24,
      SOFTMAX = 25,
      SPACE_TO_DEPTH = 26,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/merge-fusion-with-dequantize.mlir

        return %1 : tensor<1x3x!quant.uniform<i8:f32, 1.000000e-03:-3>>
      }
    }
    
    // -----
    
    // Merge fusion with dequantize for relu6 case.
    
    module attributes {tf_saved_model.semantics} {
      // CHECK-LABEL: func.func private @merge_relu6_fusion
      func.func private @merge_relu6_fusion(%arg0: tensor<1x4xf32>) -> tensor<1x3xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 04 23:45:53 UTC 2024
    - 14K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfr/ir/tfr_ops.td

       range for the fused activation `act` with the quantization defined by the
       `scale` and `zero point`. Currently, the allowed activations are
       `NONE`, `RELU`, `RELU6` and `RELU_N1_TO_1`.
    
        Example:
    
        ```mlir
        %3, %4 = tfr.quant_act_range(%2, %1, %0) :
            (tfr.attr, float, i64) -> (tfr.tensor, tfr.tensor)
        ```
      }];
    
      let arguments = (ins
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 22 10:54:29 UTC 2024
    - 17.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composit_functions_debugging.mlir

        %1 = "tf.BiasAdd"(%0, %arg2) {data_format = "NHWC", device = ""} : (tensor<*xf32>, tensor<2xf32>) -> tensor<*xf32>
        %2 = "tf.Relu6"(%1) : (tensor<*xf32>) -> tensor<*xf32>
        func.return %2 : tensor<*xf32>
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 06 01:23:21 UTC 2023
    - 80.5K bytes
    - Viewed (0)
  5. tensorflow/cc/gradients/nn_grad.cc

      grad_outputs->push_back(dx);
      return scope.status();
    }
    REGISTER_GRADIENT_OP("Relu6", Relu6GradHelper);
    
    Status LeakyReluGradHelper(const Scope& scope, const Operation& op,
                               const std::vector<Output>& grad_inputs,
                               std::vector<Output>* grad_outputs) {
      float alpha;
      TF_RETURN_IF_ERROR(GetNodeAttr(op.node()->attrs(), "alpha", &alpha));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 27 23:34:33 UTC 2022
    - 24.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td

                   (BinBroadcastDimensions $one, $features))))>;
    
    //===----------------------------------------------------------------------===//
    // Relu op patterns.
    //===----------------------------------------------------------------------===//
    
    // TODO(hinsu): Make these patterns to TF to TF lowering. Relu6 lowering will
    // require HLO canonicalization of min and max on a tensor to ClampOp.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 34.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/quantize.mlir

      func.return %1 : tensor<1x1x1x16xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:10:13 UTC 2024
    - 39.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/insert_calibration_statistics_saver.mlir

      %1 = "tf.BiasAdd"(%0, %arg2) <{data_format = "NHWC"}> : (tensor<1x2x2x2xf32>, tensor<2xf32>) -> tensor<1x2x2x2xf32>
      %2 = "tf.Relu6"(%1) {device = ""} : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
      return %2 : tensor<1x2x2x2xf32>
    }
    // CHECK-LABEL: @composite_conv2d_with_bias_and_relu6_fn_1
    // CHECK-NOT: "tf.CalibrationStatisticsSaver"
    
    // -----
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 01:09:50 UTC 2024
    - 24.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/flatbuffer_operator.cc

      return llvm::StringSwitch<tflite::ActivationFunctionType>(str)
          .Case("NONE", tflite::ActivationFunctionType_NONE)
          .Case("RELU", tflite::ActivationFunctionType_RELU)
          .Case("RELU_N1_TO_1", tflite::ActivationFunctionType_RELU_N1_TO_1)
          .Case("RELU6", tflite::ActivationFunctionType_RELU6)
          .Case("TANH", tflite::ActivationFunctionType_TANH)
          .Case("SIGN_BIT", tflite::ActivationFunctionType_SIGN_BIT);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 38K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

        }]>
      ];
    }
    
    def TFL_Relu6Op: TFL_Op<"relu6", [
        PredOpTrait<"x and y must have same element type",
          TFL_TCresVTEtIsSameAsOp<0, 0>>,
        Pure,
        QuantizableResult,
        SameOperandsAndResultShape]> {
      let summary = "Relu6 operator";
    
      let description = [{
        Element-wise Relu6 operator
          x -> max(0, min(6, x))
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
Back to top