Search Options

Results per page
Sort
Preferred Languages
Advance

Results 71 - 80 of 108 for Selu (0.15 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir

      return %2 : tensor<*xf32>
    }
    
    // CHECK-LABEL: gelu_aten
    // CHECK: %0 = "tfl.gelu"(%arg0) <{approximate = false}> : (tensor<5x10xf32>) -> tensor<5x10xf32>
    
    func.func private @gelu_decomp_2(%arg0: tensor<5x10xf32>) -> tensor<5x10xf32>
    func.func @gelu_aten_approximate(%arg0: tensor<5x10xf32>) -> (tensor<*xf32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 32.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc

    // tfl.Abs / tfl.Average_pool_2d / tfl.Cos / tfl.div / tfl.exp / tfl.hardswish /
    // tfl.log / tfl.logistic / tfl.max_pool_2d / tfl.mirror_pad / tfl.maximum /
    // tfl.custom / tfl.mean / tfl.minimum / tfl.pad / tfl.pow / tfl.prelu /
    // tfl.relu / tfl.relu6 / tfl.rsqrt / tfl.sin / tfl.slice / tfl.softmax /
    // tfl.space_to_depth / tfl.sqrt / tfl.square / tfl.squared_difference /
    // tfl.strided_slice / tfl.tanh / tfl.transpose / tfl.transpose_conv
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 7.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir

        %14 = "tfl.relu"(%10#1) : (tensor<4x2xf32>) -> tensor<4x2xf32>
        %15 = "tfl.logistic"(%10#0) : (tensor<4x2xf32>) -> tensor<4x2xf32>
        %16 = tfl.mul %15, %14 {fused_activation_function = "NONE"} : tensor<4x2xf32>
        %17 = tfl.add %13, %16 {fused_activation_function = "NONE"} : tensor<4x2xf32>
        %18 = "tfl.relu"(%17) : (tensor<4x2xf32>) -> tensor<4x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td

                   (MHLO_ConstantOp:$one (GetScalarOfType<1> $features)),
                   (BinBroadcastDimensions $one, $features))))>;
    
    //===----------------------------------------------------------------------===//
    // Relu op patterns.
    //===----------------------------------------------------------------------===//
    
    // TODO(hinsu): Make these patterns to TF to TF lowering. Relu6 lowering will
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 34.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/merge_fusion_with_dequantize.cc

        func_op.eraseResult(0);
        func_op.insertResult(0, new_call_op.getResult(0).getType(),
                             /*resultAttrs=*/nullptr);
    
        // Modify the quantized fused function to do dequantize+relu(6).
        rewriter.setInsertionPoint(req_op);
        Value new_result = rewriter.create<mlir::stablehlo::UniformDequantizeOp>(
            req_op.getLoc(), func_op.getResultTypes()[0], req_op.getOperand());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla.mlir

      %3 = "tf.BiasAdd"(%2, %cst_0) {data_format = "NHWC", device = ""} : (tensor<1x3x2x2xf32>, tensor<2xf32>) -> tensor<1x3x2x2xf32>
      %4 = "tf.Relu"(%3) {device = ""} : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2xf32>
      %5 = "quantfork.qcast"(%4) : (tensor<1x3x2x2xf32>) -> tensor<1x3x2x2x!quant.uniform<i8:f32, 0.0027450981093387976:-19>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 8.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/tests/ops.mlir

      // CHECK: "tfl.gelu"(%arg0)
      %0 = "tfl.gelu"(%arg0) {approximate = false}: (tensor<1x2x3x4x5xf32>) -> tensor<1x2x3x4x5xf32>
      func.return %0 : tensor<1x2x3x4x5xf32>
    }
    
    // -----
    
    // test invalid GELU input
    func.func @testGeluWithWrongInputType(%arg0: tensor<?xf32>) -> tensor<?xi32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 189.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/tf_to_corert_pipeline.mlir

          %outputs_12, %control_13 = tf_executor.island wraps "tf.Relu"(%outputs_10) {device = ""} : (tensor<16x112x112x?xf32>) -> tensor<16x112x112x?xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 7.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

      let hasFolder = 1;
    }
    
    def TFL_ReluOp: TFL_Op<"relu", [
        PredOpTrait<"x and y must have same element type",
          TFL_TCresVTEtIsSameAsOp<0, 0>>,
        Pure,
        QuantizableResult,
        SameOperandsAndResultShape]> {
      let summary = "Relu operator";
    
      let description = [{
        Element-wise Relu operator
          x -> max(0, x)
      }];
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

      %broad2_s = "quantfork.stats"(%broad2) {layerStats = dense<[0.000000e+00, 1.000000e+01]> : tensor<2xf32>} : (tensor<?x26x26x26x16xf32>) -> tensor<?x26x26x26x16xf32>
      %add = "tfl.add"(%broad1_s, %broad2_s) {fused_activation_function = "RELU"} : (tensor<?x26x26x26x16xf32>, tensor<?x26x26x26x16xf32>) -> tensor<?x26x26x26x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
Back to top