- Sort Score
- Result 10 results
- Languages All
Results 111 - 120 of 185 for Axis (0.03 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc
// Enable per-channel quantization for convolution weights. QuantizedType conv_weight_quantized_type{}; // Assumes NHWC format, specifying the channel dimension (3) as the // quantized axis. conv_weight_quantized_type.mutable_dimension_specs()->set_dimension(3); // The index of weight operands passed to lifted functions for convolution // is 1. StaticRangePtq& static_range_ptq_spec =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 03:36:50 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/python/tfr_gen.py
return type_ def _pack_tensor_list(self, value): # This is packing a list of tensors, then the axis is 0. axis = self._ssa_name('zero') self._emit_with_loc('\n{} = arith.constant 0 : i64'.format(axis)) casted = self._ssa_name('pack') self.emit('\n{} = tfr.call @tf__pack({}, {})'.format(casted, value, axis)) self._emit_with_loc(' : (!tfr.tensor_list, i64) -> !tfr.tensor') # load the op def of tf.Pack
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 27 15:27:03 UTC 2022 - 55.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
assert(input_axis < input_indices->size()); input_indices->operator[](input_axis) = static_cast<uint64_t>(i); // Write the value from `input_tensor` if it is the last axis or // recurse into the next axis. const bool is_last_axis = output_axis == num_dimensions - 1; if (is_last_axis) { new_values->push_back( input_tensor.getValues<Attribute>()[*input_indices]);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/tests/target-annotation.mlir
// CHECK: tac.device = "GPU", tac.inference_type = "FLOAT" %2 = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32> // CHECK: tac.device = "CPU", tac.inference_type = "FLOAT" %3 = "tfl.pack"(%arg0, %arg1) {axis = 0 : i32, values_count = 2 : i32} : (tensor<1xf32>, tensor<1xf32>) -> tensor<2x1xf32> func.return } func.func @notAnnotateConst(%arg0: tensor<256x32x32x3xf32>) -> tensor<256x30x30x16xf32> {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 19 19:32:06 UTC 2023 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/importer_test_min_max.cc
// CHECK-NEXT: %[[fc:.*]]:2 = "tfl.fully_connected"(%[[stat]], %arg1, // CHECK-NEXT: %[[stat1:.*]] = "quantfork.stats"(%[[fc]]#0) // CHECK-SAME: <{axis = 1 : i64, // CHECK-SAME: axisStats = dense<{{\[}}[-0.000000e+00, 0.000000e+00], // CHECK-SAME: [-1.000000e+00, 1.000000e+00], // CHECK-SAME: [-2.000000e+00, 2.000000e+00]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 6.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training-16bits.mlir
%input = "quantfork.stats"(%arg0) { layerStats = dense<[0.0, 1.0]> : tensor<2xf32>, axisStats = dense<[ [-1.0, 1.0], [-8.0, 8.0], [-0.5, 0.5] ]> : tensor<3x2xf32>, axis = 2 : i64 } : (tensor<1x2x3xf32>) -> tensor<1x2x3xf32> %1 = "tfl.pseudo_const"() {value = dense<[[0.1]]> : tensor<1x1xf32>} : () -> tensor<1x1xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 26.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/unfreeze_constants.mlir
// CHECK-DAG: %[[CST_1:.*]] = "tf.Const"() <{{{.*value = dense<5.000000e\+00> : tensor<4xf32>.*}}}> // CHECK-DAG: %[[AXIS:.*]] = "tf.Const"() <{{{.*value = dense<0> : tensor<i64>.*}}}> // CHECK-DAG: %[[CONCAT:.*]] = "tf.ConcatV2"(%[[READ_VAR_0]], %[[CST_1]], %[[AXIS]]) // CHECK: return %[[CONCAT]] : tensor<12xf32> } // ----- // Tests a case where the ConstOp's location is a fused loc containing more
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 17.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// quantization for the quantized kernel. If the quantized dimension // changes, the following logic no longer works as the same `params` // shouldn't be used for both input and output quantization params. // E.g. During TransposeOp's quantization propagation in // PrepareQuantize, if the quantization is per-axis and the
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
}; #include "tensorflow/compiler/mlir/lite/utils/generated_op_quant_spec_getters.inc" // If the weight is applicable to dynamic range quantization, insert Quantize // and Dequantize ops with either per-axis or per-tensor scale. class PrepareDynamicRangeQuantizableOp : public OpRewritePattern<arith::ConstantOp> { public: explicit PrepareDynamicRangeQuantizableOp(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize.mlir
%axis = arith.constant dense<2> : tensor<i32> %res_ff = "tfl.cumsum"(%arg, %axis) {exclusive = false, reverse = false} : (tensor<1x2x1x3xf32>, tensor<i32>) -> tensor<1x2x1x3xf32> // Eliminated %res_ft = "tfl.cumsum"(%arg, %axis) {exclusive = false, reverse = true} : (tensor<1x2x1x3xf32>, tensor<i32>) -> tensor<1x2x1x3xf32> // Eliminated
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 284.1K bytes - Viewed (0)