- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 171 for conv3d (0.13 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_xla_weight_only.mlir
parameters[ {"quantized_ops": ["MatMul"], "internal_func_name": "internal_matmul_fn"}, {"quantized_ops": ["Conv2D"], "internal_func_name": "internal_conv2d_fn"}, {"quantized_ops": ["DepthwiseConv2D"], "internal_func_name": "internal_depthwise_conv2d_fn"}, {"quantized_ops": ["Conv3D"], "internal_func_name": "internal_conv3d_fn"}, {"quantized_ops": ["BatchMatMul"], "internal_func_name": "internal_batch_matmul_fn"} ]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir
// CHECK: func private @quantized_matmul_with_relu_fn // CHECK: func private @quantized_matmul_with_relu6_fn // CHECK: func private @quantized_conv3d_with_bias_fn // CHECK-SAME: tf_quant.quantized_ops = ["Conv3D", "BiasAdd"] // CHECK: func private @quantized_batch_matmul_with_bias_fn // CHECK-SAME: tf_quant.quantized_ops = ["BatchMatMul", "BiasAdd"] // CHECK: func private @quantize_i8 // CHECK: func private @dequantize_i8
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Aug 29 01:13:58 UTC 2023 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions.mlir
%0 = "tf.Conv3D"(%arg0, %cst) { data_format = "NDHWC", device = "", dilations = [1, 1, 1, 1, 1], padding = "SAME", strides = [1, 1, 2, 1, 1] } : (tensor<1x3x4x3x3xf32>, tensor<2x3x3x3x2xf32>) -> tensor<1x3x2x3x2xf32> %1 = "tf.Relu"(%0) {device = ""} : (tensor<1x3x2x3x2xf32>) -> tensor<1x3x2x3x2xf32> %2 = "tf.Conv3D"(%arg0, %cst) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 26.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-dynamic-range.mlir
// CHECK: %[[conv3d:.*]] = "tfl.conv_3d"(%arg0, %[[w]], %[[const]]) <{dilation_d_factor = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_d = 1 : i32, stride_h = 1 : i32, stride_w = 1 : i32}> : (tensor<?x28x28x28x8xf32>, tensor<3x3x3x8x16xf32>, none) -> tensor<?x26x26x26x16xf32> // CHECK: %2 = "tfl.shape"(%[[conv3d]]) : (tensor<?x26x26x26x16xf32>) -> tensor<5xi64>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 38.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
} else if (function_name.contains("conv2d")) { // For Conv2D, the channel dimension must be static to calculate the // feature group count. if (!HasStaticShapeAtDims(call_op->getOperand(0), /*dims=*/3)) { return absl::InternalError( "The channel dimension of Conv2D is required to be static."); } } else if (function_name.contains("conv3d")) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td
[(HasRankOf<1> $add_rhs_value), (HasEqualElementSize<[-1], [0]> $conv_out, $add_rhs)], [], (addBenefit -1)>; // Convert conv+sub+mul pattern to conv+mul+add. // (conv - sub) * mul -> conv * mul + (-sub) * mul // // This is needed to support Conv+BatchNorm pattern from Jax models converted // using jax2tf w/o native serialization. Note that Jax2tf patterns always
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 03:24:59 UTC 2024 - 8.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc
if (function_name.contains("with_bias")) { spec->biases_params[2] = {{0, 1}, quant::GetUniformQuantizedTypeForBias}; } } else if (function_name.contains("conv3d")) { spec->coeff_op_quant_dim[1] = 4; if (function_name.contains("with_bias")) { spec->biases_params[2] = {{0, 1}, quant::GetUniformQuantizedTypeForBias}; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
if (!TFPaddingIsSameOrValid(op, &padding)) return failure(); // TensorFlow Conv3D has no bias, optimization patterns will fuse Conv3D // with other ops can fill the bias. Value none = rewriter.create<TFL::NoValueOp>( op->getLoc(), rewriter.getNoneType(), rewriter.getUnitAttr()); rewriter.replaceOpWithNewOp<TFL::Conv3DOp>( op, tf_op.getType(), tf_op.getInput(), tf_op.getFilter(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions_drq.cc
if ((quantization_method_ == tensorflow::quantization::QuantizationMethod:: METHOD_DYNAMIC_RANGE_INT8) && (function_name.contains("batch_matmul") || function_name.contains("conv3d"))) { call_op->removeAttr(kQuantTraitAttrName); } // TODO(b/270906404): Support weight-only gather for uniform quantized opset // in PTQ mode if (target_opset_ == OpSet::UNIFORM_QUANTIZED &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library.mlir
equation = "", attr_map = "equation:0" } : (tensor<*xi32>, tensor<*xi32>) -> tensor<*xi32> func.return %4 : tensor<*xi32> } for main_op in ["Conv2D", "DepthwiseConv2D", "MatMul", "Conv3D", "BatchMatMul", "Einsum"] { parameters[ {"quantized_ops": ["${main_op}", "BiasAdd"], "act_func": "internal_requantize_no_activation_fn", "output_type": "i8"},
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jan 08 01:16:10 UTC 2024 - 30.6K bytes - Viewed (0)