- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 47 for Axis (0.03 sec)
-
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization.td
">::Impl")>; // Specify the operand index of the coefficient operand for an affine op // and also the quantization dimension if per-axis quantization is support. // If the quantization dimension is -1, per-axis quantization isn't supported. class AffineOpCoefficient<int dim, int index> : NativeOpTrait< !strconcat("quant::AffineOpCoefficient<", !interleave([dim, index], ", "),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/reduce.h
Value operand = reduce_op.getInputs().front(); int64_t axis = reduce_op.getDimensions().getValues<int64_t>()[0]; auto dim_type = RankedTensorType::get({1}, rewriter.getI32Type()); auto reduction_indices = rewriter.create<arith::ConstantOp>( reduce_op.getLoc(), dim_type, rewriter.getI32TensorAttr({static_cast<int32_t>(axis)})); // Generate a Max and an ArgMax of as the mhlo op returns both while in TF
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/ir/QuantOps.cc
return emitOpError("layerStats must have shape [2]"); } } // Verify axisStats (optional) attribute. if (getAxisStats()) { if (!getAxis()) return emitOpError("axis must be specified for axisStats"); auto shape = tensorArg.getShape(); auto argSliceSize = std::accumulate(std::next(shape.begin(), *getAxis()), shape.end(), 1,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.cc
return emitOpError("layerStats must have shape [2]"); } } // Verify axisStats (optional) attribute. if (getAxisStats()) { if (!getAxis()) return emitOpError("axis must be specified for axisStats"); auto shape = tensorArg.getShape(); auto argSliceSize = std::accumulate(std::next(shape.begin(), *getAxis()), shape.end(), 1,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_custom_aggregation_op_to_quant_stats.cc
{static_cast<float>(min.getValueAsDouble()), static_cast<float>(max.getValueAsDouble())}); ElementsAttr axis_stats; IntegerAttr axis; quantfork::StatisticsOp stats_op = rewriter.create<quantfork::StatisticsOp>( op->getLoc(), op.getInput(), layer_stats, axis_stats, axis); op.getOutput().replaceAllUsesWith(stats_op.getResult()); return success(); } };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_traits.h
} }; }; // The trait to specify the operand index of the coefficient for an affine op // and also the quantization dimension if per-axis quantization is support. // If the quantization dimension is -1, per-axis quantization isn't supported. // // class Conv2DOp // : public Op<Conv2DOp, OpTrait::quant::AffineOpCoefficient<0>::Impl> // template <int QuantDim, int OperandIndex = 1>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 05 07:39:40 UTC 2024 - 5.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/quantization.mlir
%cst = "tfl.pseudo_qconst"() {qtype = tensor<1x2x!quant.uniform<u8:f32, 1.0>>, value = dense<-76> : tensor<1x2xi8>} : () -> tensor<1x2x!quant.uniform<u8:f32, 1.0>> %2 = "tfl.concatenation"(%1, %cst) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1x2x!quant.uniform<u8:f32, 1.0>>, tensor<1x2x!quant.uniform<u8:f32, 1.0>>) -> tensor<2x2x!quant.uniform<u8:f32, 1.0>>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/uniform_quantized_types.h
} // Returns true iff `type` is a uniform quantized type whose storage type is // 8-bit integer and expressed type is f32. bool IsI8F32UniformQuantizedType(Type type); // Returns true iff `type` is a uniform quantized per-axis (per-channel) type // whose storage type is 8-bit integer and expressed type is f32. bool IsI8F32UniformQuantizedPerAxisType(Type type); // Returns true iff `type` is a uniform quantized type whose storage type is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/examples/mnist/mnist_train.py
train_step, args=(dist_inputs,)) accuracy = strategy.reduce( tf.distribute.ReduceOp.MEAN, per_replica_accuracy, axis=None) loss_value = strategy.reduce( tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None) return accuracy, loss_value iterator = iter(ds_train) accuracy = 0.0 for step in range(flags.FLAGS.train_steps):
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 20 03:05:18 UTC 2021 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc
spec->biases_params[2] = {{0, 1}, quant::GetUniformQuantizedTypeForBias}; } } else if (function_name.contains("gather")) { // Note that gather has axis attribute that specifies channel axis. spec->coeff_op_quant_dim[0] = -1; } for (auto quantizable_operand : spec->coeff_op_quant_dim) { spec->quantizable_operands.insert(quantizable_operand.first); } }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0)