Search Options

Results per page
Sort
Preferred Languages
Advance

Results 61 - 70 of 108 for Selu (0.05 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

          # If present the last op before return should be stablehlo.clamp for relu6
          # and stablehlo.maximum for relu.
          if activation_fn is nn_ops.relu6:
            self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return')
          elif activation_fn is nn_ops.relu:
            self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return')
        else:
          # Check activation functions are implicit.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver_test.cc

          %0 = "tfl.conv_2d"(%arg0, %arg1, %arg2) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<1x4x4x3xf32>, tensor<3x1x1x3xf32>, tensor<3xf32>) -> tensor<1x4x4x3xf32>
          return %0 : tensor<1x4x4x3xf32>
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir

    // CHECK: func private @quantized_conv2d_with_relu6_fn
    // CHECK: func private @quantized_depthwise_conv2d_with_bias_and_relu_float_output_fn
    // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D", "BiasAdd", "Relu"]
    // CHECK: func private @quantized_matmul_with_bias_fn
    // CHECK: func private @quantized_matmul_with_bias_and_relu_fn
    // CHECK: func private @quantized_matmul_with_bias_and_relu6_fn
    // CHECK: func private @quantized_matmul_fn
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 3.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/prepare-quantize-signed.mlir

        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
      } : (tensor<1x5x5x2xf32>, tensor<3x1x1x2xf32>, tensor<3xf32>) -> tensor<1x5x5x3xf32>
      %conv2 = "tfl.conv_2d"(%0, %w, %b2) {
        dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU",
        padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 18.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

        // Currently, GPU only supports Conv2D+BiasAdd+Relu fusion.
        if (IsGpuDevice(conv)) {
          auto activation = GetActivation(bias_add);
          if (!activation || activation->getName().stripDialect() != "Relu" ||
              !bias_add.getOutput().hasOneUse()) {
            (void)rewriter.notifyMatchFailure(conv, [&](Diagnostic &diag) {
              diag << "GPU only supports Conv2D+BiasAdd+Relu fusion";
            });
            return false;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test.py

          ('none', None, False, False, quant_opts_pb2.TF, False, 'SAME'),
          ('relu', nn_ops.relu, False, False, quant_opts_pb2.TF, False, 'SAME'),
          ('relu6', nn_ops.relu6, False, False, quant_opts_pb2.TF, False, 'SAME'),
          ('with_bias', None, True, False, quant_opts_pb2.TF, False, 'SAME'),
          (
              'with_bias_and_relu',
              nn_ops.relu,
              True,
              False,
              quant_opts_pb2.TF,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 235.6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir

      // tf.MyCustomOp is the result of conversion to a Custom op
      %2 = "tf.MyCustomOp"(%1, %0) {fused_activation_function = "RELU", int_attr = 2 : i32}  : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("MyCustomOp")
      %3 = "tfl.exp"(%2)  : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
      func.return %3 : tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 4.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/optimize_no_verify.mlir

      %cst = arith.constant dense<0.0> : tensor<2x3xbf16>
      %0 = "tfl.maximum"(%arg0, %cst) : (tensor<2x3xbf16>, tensor<2x3xbf16>) -> tensor<2x3xbf16>
      func.return %0 : tensor<2x3xbf16>
    
      // CHECK: %[[RESULT:.*]] = "tfl.relu"(%arg0)
      // CHECK: return %[[RESULT]]
    }
    
    // CHECK-LABEL: fuseScalarAddIntoConv2dBf16
    func.func @fuseScalarAddIntoConv2dBf16(%arg0: tensor<256x32x32x3xbf16>, %arg1: tensor<16x3x3x3xbf16>) -> tensor<256x8x7x16xbf16> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/ir/tfl_op_enums.td

    }
    
    // Allowed activation function cases
    // These should match the ActivationFunctionType enum in TFLite schema.
    def TFL_AFEnum_None  : I32EnumAttrCase<"NONE", 0>;
    def TFL_AFEnum_Relu  : I32EnumAttrCase<"RELU", 1>;
    def TFL_AFEnum_Relu1 : I32EnumAttrCase<"RELU_N1_TO_1", 2>;
    def TFL_AFEnum_Relu6 : I32EnumAttrCase<"RELU6", 3>;
    def TFL_AFEnum_Tanh  : I32EnumAttrCase<"TANH", 4>;
    def TFL_AFEnum_Sign  : I32EnumAttrCase<"SIGN_BIT", 5>;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Oct 20 00:05:24 UTC 2022
    - 6.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs

      HASHTABLE_LOOKUP = 10,
      L2_NORMALIZATION = 11,
      L2_POOL_2D = 12,
      LOCAL_RESPONSE_NORMALIZATION = 13,
      LOGISTIC = 14,
      LSH_PROJECTION = 15,
      LSTM = 16,
      MAX_POOL_2D = 17,
      MUL = 18,
      RELU = 19,
      // NOTE(aselle): RELU_N1_TO_1 used to be called RELU1, but it was renamed
      // since different model developers use RELU1 in different ways. Never
      // create another op called RELU1.
      RELU_N1_TO_1 = 20,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 14:28:27 UTC 2024
    - 30K bytes
    - Viewed (0)
Back to top