- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 81 for conv3d (0.12 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir
%5 = "tf.MatMul"(%1, %3) { attr_map = "transpose_a:0,transpose_b:1" } : (tensor<*xi32>, tensor<*xi32>) -> tensor<*xi32> func.return %5 : tensor<*xi32> } // Conv2D with int32 accumulation func.func private @internal_conv2d_fn( %input : tensor<*xi8>, %filter : tensor<*xi8>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 12.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_drq.mlir
%conv = "tf.Conv2D"(%arg0, %arg1) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 2, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x2x2x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32> return %conv : tensor<*xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Jan 05 18:35:42 UTC 2024 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_patterns.td
(UpdateShapeWithAxis<-1> $qtype, $old_value))), [(CanUpdateShapeWithAxis<-1> $qtype, $old_value)]>; // The axis is set to 0 because the transpose is from the legalization of // tf.conv2d and the new channel axis is the first dimension. def ReorderTransposeDequantQuantUsedByConv : Pat<(TF_TransposeOp:$old_value (TFL_DequantizeOp (TFL_QuantizeOp $input, $qtype)), $perm),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 00:40:15 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions_weight_only.mlir
%conv = "tf.Conv2D"(%arg0, %arg1) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 2, 2, 1], explicit_paddings = [], padding = "SAME", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x2x2x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32> return %conv : tensor<*xf32> }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 11.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/quantize-numeric-verify.mlir
// DEBUG: %[[act:.*]] = "tfl.dequantize"(%arg0) : (tensor<1x224x224x3x!quant.uniform<u8:f32, 7.812500e-03:128>>) -> tensor<1x224x224x3xf32> // DEBUG: %[[f_conv:.*]] = "tfl.conv_2d"(%[[act]], %[[wt]], %[[bias]]) // DEBUG: %[[q_conv:.*]] = "tfl.conv_2d" // DEBUG: "tfl.NumericVerify"(%[[q_conv]], %[[f_conv]]) <{log_if_failed = true, tolerance = 5.000000e+00 : f32}>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 15.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/quantization.mlir
// CHECK-NEXT: version: 1, // CHECK-NEXT: builtin_code: QUANTIZE // CHECK-NEXT: }, { // CHECK-NEXT: deprecated_builtin_code: 3, // CHECK-NEXT: version: 1, // CHECK-NEXT: builtin_code: CONV_2D // CHECK-NEXT: }, { // CHECK-NEXT: deprecated_builtin_code: 22, // CHECK-NEXT: version: 1, // CHECK-NEXT: builtin_code: RESHAPE // CHECK-NEXT: }, { // CHECK-NEXT: deprecated_builtin_code: 25,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 11.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_xla.mlir
%conv = "tf.Conv2D"(%dq_input, %dq_weight) {attr_map = "0:strides,1:use_cudnn_on_gpu,2:padding,3:explicit_paddings,4:dilations", data_format = "NHWC", device = "", dilations = [1, 1, 1, 1], explicit_paddings = [], padding = "VALID", strides = [1, 1, 2, 1], use_cudnn_on_gpu = true} : (tensor<1x3x4x3xf32>, tensor<2x3x3x2xf32>) -> tensor<*xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 08 19:32:28 UTC 2024 - 11.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/README.md
In this pass, every op will be targeted with the user specified targets based on the device capabilites. For example, If the user specified the desired targets are "GPU", "CPU", `conv2d` can run on both "GPU" and "CPU", we will annotate the op `conv2d` with "GPU" since it's preferred; `pack` can only run on "CPU", so we will annotate the op with "CPU" since "GPU" does not support this op. #### Raise Target Subgraphs Pass
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 29 18:32:13 UTC 2022 - 11.6K bytes - Viewed (0) -
tensorflow/cc/gradients/nn_grad.cc
op.input(2), strides, padding, filter_attrs)); Conv2D::Attrs conv_attrs; conv_attrs.use_cudnn_on_gpu_ = use_cudnn_on_gpu; conv_attrs.explicit_paddings_ = explicit_paddings; conv_attrs.data_format_ = data_format; conv_attrs.dilations_ = dilations; grad_outputs->push_back( Conv2D(scope, grad_inputs[0], op.input(1), strides, padding, conv_attrs)); return scope.status(); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 27 23:34:33 UTC 2022 - 24.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tpu_space_to_depth_pass.mlir
%5 = "tf.Pad"(%arg0, %3) : (tensor<2x224x224x3xf32>, tensor<4x2xi32>) -> tensor<2x230x230x3xf32> // CHECK: "tf.Conv2D" // CHECK-SAME: strides = [1, 1, 1, 1] // CHECK-SAME: (tensor<2x115x115x12xf32>, tensor<4x4x12x64xf32>) -> tensor<2x112x112x64xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 37.4K bytes - Viewed (0)