Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 70 for conv_3d (0.53 sec)

  1. tensorflow/compiler/mlir/lite/tests/end2end/conv_2d.pbtxt

        }
      }
      attr {
        key: "_class"
        value {
          list {
            s: "loc:@conv_net_2d/conv_2d_0/w"
          }
        }
      }
    }
    node {
      name: "conv_net_2d_1/conv_2d_0/convolution"
      op: "Conv2D"
      input: "input"
      input: "conv_net_2d/conv_2d_0/w/read"
      attr {
        key: "T"
        value {
          type: DT_FLOAT
        }
      }
      attr {
        key: "data_format"
        value {
          s: "NHWC"
        }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jun 28 06:29:38 UTC 2019
    - 3.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/prepare-tf-with-allowing-bf16-and-f16-type-legalization.mlir

      %0 = "tf.Conv2D"(%arg0, %arg1) {T = "tfdtype$DT_FLOAT", data_format = "NHWC", dilations = [1, 2, 3, 1], padding = "SAME", strides = [1, 4, 5, 1]} : (tensor<256x32x32x3xbf16>, tensor<3x3x3x16xbf16>) -> tensor<256x8x7x16xbf16>
      func.return %0 : tensor<256x8x7x16xbf16>
      // CHECK: "tfl.conv_2d"
    }
    
    // CHECK-LABEL: fused_batch_norm_v3_bf16
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 26 23:53:32 UTC 2022
    - 2.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/g3doc/space_to_depth.md

    fused with `automatic double transpose` to reduce extra overhead on the host.
    
    ### Extend from Conv2D to Conv3D
    
    SpaceToDepth not only helps with 2D image models but also 3D image models such
    as I3D. The plan is to apply automatic space to depth for Conv2D as the first
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Oct 24 02:51:43 UTC 2020
    - 8.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir

    // CHECK: func private @quantized_matmul_with_relu_fn
    // CHECK: func private @quantized_matmul_with_relu6_fn
    // CHECK: func private @quantized_conv3d_with_bias_fn
    // CHECK-SAME: tf_quant.quantized_ops = ["Conv3D", "BiasAdd"]
    // CHECK: func private @quantized_batch_matmul_with_bias_fn
    // CHECK-SAME: tf_quant.quantized_ops = ["BatchMatMul", "BiasAdd"]
    // CHECK: func private @quantize_i8
    // CHECK: func private @dequantize_i8
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 3.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/optimize_functional_ops.mlir

        else_branch = @_functionalize_if_else_branch_00, is_stateless = false,
        then_branch = @_functionalize_if_then_branch_00} :
          (tensor<i1>, tensor<i1>, tensor<3x15x14x3xf32>, tensor<3x15x14x3xf32>) -> tensor<i1>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 30 10:34:48 UTC 2022
    - 8.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_xla_weight_only.mlir

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 03 15:43:38 UTC 2023
    - 7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_patterns.h

                                    PatternRewriter& rewriter) const override;
    };
    
    // Ensure bias for conv2d op.
    struct EnsureBiasForConv2d : public OpRewritePattern<TFL::Conv2DOp> {
      using OpRewritePattern<TFL::Conv2DOp>::OpRewritePattern;
    
      LogicalResult matchAndRewrite(TFL::Conv2DOp conv_op,
                                    PatternRewriter& rewriter) const override;
    };
    
    // Pad slice to 4d.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 03 16:37:16 UTC 2022
    - 4.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td

             tensor<64x3x3x3xf32>
        %conv = "tfl.conv_2d"(%input_act, %w, %bias)
    
        but if it is supported, it will be rewritten as:
    
        %q_w = "tfl.pseudo_qconst"() {
             qtype = tensor<64x3x3x3x!quant.uniform<i8<-127:127>:f32, 1.000000e+00>>
        %conv = "tfl.conv_2d"(%input_act, %q_w, %bias)
    
        Note that this is part of reaching feature parity with the old quantizer for
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/optional_input.json

    // This test is to test that if the flatbuffer omits the last optional input `bias` of tfl.conv_2d op, the flatbuffer_importer will automatically adds `none` value to tfl.conv_2d.
    
    // CHECK: %[[CST:.*]] = "tfl.no_value"() <{value}> : () -> none
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 1.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc

          if (function_name.contains("with_bias")) {
            spec->biases_params[2] = {{0, 1},
                                      quant::GetUniformQuantizedTypeForBias};
          }
        } else if (function_name.contains("conv3d")) {
          spec->coeff_op_quant_dim[1] = 4;
          if (function_name.contains("with_bias")) {
            spec->biases_params[2] = {{0, 1},
                                      quant::GetUniformQuantizedTypeForBias};
          }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.3K bytes
    - Viewed (0)
Back to top