Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 60 for conv_2d (0.31 sec)

  1. tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs

    // object containing configuration parameters, builtins have a predetermined
    // set of acceptable options.
    // LINT.IfChange
    enum BuiltinOperator : int32 {
      ADD = 0,
      AVERAGE_POOL_2D = 1,
      CONCATENATION = 2,
      CONV_2D = 3,
      DEPTHWISE_CONV_2D = 4,
      DEPTH_TO_SPACE = 5,
      DEQUANTIZE = 6,
      EMBEDDING_LOOKUP = 7,
      FLOOR = 8,
      FULLY_CONNECTED = 9,
      HASHTABLE_LOOKUP = 10,
      L2_NORMALIZATION = 11,
      L2_POOL_2D = 12,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 14:28:27 UTC 2024
    - 30K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/schema/schema.fbs

      fused_activation_function:ActivationFunctionType;
      dilation_w_factor:int = 1;
      dilation_h_factor:int = 1;
      // Parameters for Conv2D version 8 or above.
      // When set, quantized_bias_type defines the dtype for both bias and accumulator.
      quantized_bias_type: TensorType;
    }
    
    // Options for both Conv3D and Conv3DTranspose.
    table Conv3DOptions {
      padding:Padding;
      stride_d:int;
      stride_w:int;
      stride_h:int;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

      %0 = "tf.Conv2D"(%arg0, %arg1) {
        data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [],
        padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true
      } : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
      %1 = "tf.Relu6"(%0) {device = ""} : (tensor<*xf32>) -> tensor<*xf32>
    
    
      %3 = "tf.Conv2D"(%arg0, %arg1) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_drq.mlir

      %cst = "tf.Const"() {value = dense<0.000000e+00> : tensor<2xf32>} : () -> tensor<2xf32>
      %cst_1 = "tf.Const"() {value = dense<3.000000e+00> : tensor<2x3x3x2xf32>} : () -> tensor<2x3x3x2xf32>
      %0 = "tf.Conv2D"(%arg0, %cst_1) {
        data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [],
        padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc

        } else if (function_name.contains("conv2d")) {
          // For Conv2D, the channel dimension must be static to calculate the
          // feature group count.
          if (!HasStaticShapeAtDims(call_op->getOperand(0), /*dims=*/3)) {
            return absl::InternalError(
                "The channel dimension of Conv2D is required to be static.");
          }
        } else if (function_name.contains("conv3d")) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 16.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_weights.mlir

    // CHECK: %[[CONV_1:.*]] = "tf.Conv2D"(%[[GATHER]], %[[DEQUANTIZED_1]]) <{data_format = "NHWC", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true}> {device = ""} : (tensor<1x3x4x3xf32>, tensor<2x3x3x1024xf32>) -> tensor<1x3x2x1024xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 42K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_lifting.mlir

    // CHECK: %[[CONV2D:.*]] = "tf.Conv2D"(%arg0, %[[CONST]]) <{data_format = "NHWC", dilations = [1, 1, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true}> : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<1x3x2x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc

        }
      }
    
      // Handle Conv2D input, stride and filter.
      HandleConv2DInput(conv2d, block_size);
      HandleConv2DStride(conv2d);
      HandleConv2DFilter(conv2d, block_size);
    
      // Book keeping new filter shape for backprop filter rewrite.
      // Filter shape is defined in HandleConv2DFilter, thus it is RankedTensorType.
      filter_shape =
          mlir::cast<RankedTensorType>(conv2d.getFilter().getType()).getShape();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 29.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/dilated-conv.mlir

      %1 = "tf.SpaceToBatchND"(%arg0, %cst, %cst_0) : (tensor<1x128x128x3xf32>, tensor<2xi32>, tensor<2x2xi32>) -> tensor<4x68x68x3xf32>
      %2 = "tf.Conv2D"(%0, %arg1) {padding = "VALID", strides = [1, 1, 1, 1]} : (tensor<4x68x68x3xf32>, tensor<5x5x3x8xf32>) -> tensor<4x64x64x8xf32>
      %3 = "tf.Conv2D"(%1, %arg1) {padding = "VALID", strides = [1, 1, 1, 1]} : (tensor<4x68x68x3xf32>, tensor<5x5x3x8xf32>) -> tensor<4x64x64x8xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 44.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_xla.mlir

      }
      func.func private @composite_conv2d_with_bias_and_relu6_fn_1(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>, %arg2: tensor<2xf32>) -> tensor<*xf32> attributes {tf_quant.composite_function} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 25.2K bytes
    - Viewed (0)
Back to top