Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 81 for conv3d (0.11 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir

      %0 = "tf.Conv3D"(%arg0, %cst) {
        data_format = "NDHWC", device = "", dilations = [1, 1, 1, 1, 1], padding = "SAME", strides = [1, 1, 2, 1, 1]
      } : (tensor<1x3x4x3x3xf32>, tensor<2x3x3x3x2xf32>) -> tensor<1x3x2x3x2xf32>
      %1 = "tf.Relu"(%0) {device = ""} : (tensor<1x3x2x3x2xf32>) -> tensor<1x3x2x3x2xf32>
    
      %2 = "tf.Conv3D"(%arg0, %cst) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir

    // CHECK: %[[conv3d:.*]] = "tfl.conv_3d"(%arg0, %[[w]], %[[const]]) <{dilation_d_factor = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_d = 1 : i32, stride_h = 1 : i32, stride_w = 1 : i32}> : (tensor<?x28x28x28x8xf32>, tensor<3x3x3x8x16xf32>, none) -> tensor<?x26x26x26x16xf32>
    // CHECK: %2 = "tfl.shape"(%[[conv3d]]) : (tensor<?x26x26x26x16xf32>) -> tensor<5xi64>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 38.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc

        } else if (function_name.contains("conv2d")) {
          // For Conv2D, the channel dimension must be static to calculate the
          // feature group count.
          if (!HasStaticShapeAtDims(call_op->getOperand(0), /*dims=*/3)) {
            return absl::InternalError(
                "The channel dimension of Conv2D is required to be static.");
          }
        } else if (function_name.contains("conv3d")) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 16.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc

      if (!TFPaddingIsSameOrValid(op, &padding)) return failure();
    
      // TensorFlow Conv3D has no bias, optimization patterns will fuse Conv3D
      // with other ops can fill the bias.
      Value none = rewriter.create<TFL::NoValueOp>(
          op->getLoc(), rewriter.getNoneType(), rewriter.getUnitAttr());
    
      rewriter.replaceOpWithNewOp<TFL::Conv3DOp>(
          op, tf_op.getType(), tf_op.getInput(), tf_op.getFilter(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 20:06:54 UTC 2024
    - 45.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir

          equation = "",
          attr_map = "equation:0"
        } : (tensor<*xi32>, tensor<*xi32>) -> tensor<*xi32>
    
        func.return %4 : tensor<*xi32>
      }
    
      for main_op in ["Conv2D", "DepthwiseConv2D", "MatMul", "Conv3D", "BatchMatMul", "Einsum"] {
        parameters[
          {"quantized_ops": ["${main_op}", "BiasAdd"], "act_func": "internal_requantize_no_activation_fn", "output_type": "i8"},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jan 08 01:16:10 UTC 2024
    - 30.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_drq.mlir

    func.func @lift_float_conv3d(%arg0: tensor<1x3x4x3x3xf32>) -> (tensor<1x3x2x3x2xf32>) {
      %cst = "tf.Const"() {device = "", value = dense<1.0> : tensor<2x3x3x3x2xf32>} : () -> tensor<2x3x3x3x2xf32>
      %0 = "tf.Conv3D"(%arg0, %cst) {
        data_format = "NDHWC", device = "", dilations = [1, 1, 1, 1, 1], padding = "SAME", strides = [1, 1, 2, 1, 1]
      } : (tensor<1x3x4x3x3xf32>, tensor<2x3x3x3x2xf32>) -> tensor<1x3x2x3x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 11.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc

      // Input: [N, H, W, C] for Conv2D or [N, D, H, W, C] for Conv3D.
      dnums.set_input_batch_dimension(0);
      dnums.set_input_feature_dimension(num_dims - 1);
      // Kernel: [K, K, I, O] for Conv2D or [K, K, K, I, O] for Conv3D.
      dnums.set_kernel_input_feature_dimension(num_dims - 2);
      dnums.set_kernel_output_feature_dimension(num_dims - 1);
      // Output: [N, H, W, C] for Conv2D or [N, D, H, W, C] for Conv3D.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 47.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.td

       (IsInt8ElementType $filter),
       (IsConstTensor $filter),
       (IsInt32ElementType $conv),
       (HasStaticShapeConstraint $filter),
       (HasStaticShapeAtDimsConstraint<"3"> $input)],
      [], (addBenefit 10)>;
    
    // Convert Conv2D with hybrid inputs (f32 activation/int8 weight) to XlaConv
    def ConvertTFConv2DToXLAConvOpWeightOnly : Pat<
      (TF_Conv2DOp:$conv
        $input,
        (TF_MulOp (TF_CastOp (TF_IdentityOp $filter), $truncate1), $scale),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Dec 10 05:52:02 UTC 2023
    - 21.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/schema/schema_v3b.fbs

    table Conv2DOptions {
      padding:Padding;
      stride_w:int;
      stride_h:int;
      fused_activation_function:ActivationFunctionType;
      dilation_w_factor:int = 1;
      dilation_h_factor:int = 1;
    }
    
    // Options for both Conv3D and Conv3DTranspose.
    table Conv3DOptions {
      padding:Padding;
      stride_d:int;
      stride_w:int;
      stride_h:int;
      fused_activation_function:ActivationFunctionType;
      dilation_d_factor:int = 1;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 14:28:27 UTC 2024
    - 30K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/schema/schema.fbs

      fused_activation_function:ActivationFunctionType;
      dilation_w_factor:int = 1;
      dilation_h_factor:int = 1;
      // Parameters for Conv2D version 8 or above.
      // When set, quantized_bias_type defines the dtype for both bias and accumulator.
      quantized_bias_type: TensorType;
    }
    
    // Options for both Conv3D and Conv3DTranspose.
    table Conv3DOptions {
      padding:Padding;
      stride_d:int;
      stride_w:int;
      stride_h:int;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 41.7K bytes
    - Viewed (0)
Back to top