- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 52 for isF32 (0.2 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/fold_constant_transpose.cc
if (!const_op) return failure(); // Only support float tensors. auto tensor_type = mlir::dyn_cast_or_null<TensorType>(const_op.getType()); if (!tensor_type || !tensor_type.getElementType().isF32()) { return failure(); } return success( mlir::isa_and_nonnull<DenseFPElementsAttr>(const_op.getValue())); } void rewrite(mlir::stablehlo::TransposeOp op,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc
// Non-float tensors do not need quantization. QuantizationUnits quantizable_ops; const ShapedType type = mlir::dyn_cast<ShapedType>(op.getType()); if (!type || !type.getElementType().isF32()) return quantizable_ops; const Value value = op.getResult(); for (OpOperand& use : value.getUses()) { Operation* user = use.getOwner(); const int operand_num = use.getOperandNumber();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
EXPECT_TRUE(mlir::isa<NoneType>(it->getOperand(20).getType())); // proj_bias is F32 EXPECT_TRUE(mlir::cast<RankedTensorType>(it->getOperand(17).getType()) .getElementType() .isF32()); // output gate bias is 0 since it is out of bounds of the bias tensor, so // we set its value as a const tensor of specified size and value 0. EXPECT_TRUE(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.td
def IsInt32ElementType : Constraint< CPred<"getElementTypeOrSelf($0).isInteger(32)">>; // Checks if the value has the type of float32. def IsF32ElementType : Constraint< CPred<"getElementTypeOrSelf($0).isF32()">>; // Checks if the value has the type of bfloat16. def IsBF16ElementType : Constraint< CPred<"getElementTypeOrSelf($0).isBF16()">>; // Checks if the value has the type of UniformQuantizedType.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 08 04:55:44 UTC 2024 - 6.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
QuantizationUnits& quantizable_ops) const { // Non-float tensors do not need quantization. auto type = mlir::dyn_cast<ShapedType>(op.getType()); if (!type || !type.getElementType().isF32()) return false; Value value = op.getResult(); // Check whether dynamic range quantization can be applied. for (auto& use : value.getUses()) { Operation* user = use.getOwner();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/odml_converter/transforms/outline_composites.cc
auto cst_op = llvm::dyn_cast_or_null<stablehlo::ConstantOp>(cst_input); if (!cst_op) return false; ElementsAttr value = cst_op.getValue(); if (!value.isSplat()) return false; if (!value.getElementType().isF32()) return false; return std::abs(value.getSplatValue<float>() - val) < kTolerance; } // Determines if the given op is semantically that of the gauss error function. bool MatchERF(Operation* op) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 9.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc
if (op->getNumResults() != 1) { return failure(); } auto type = mlir::cast<TensorType>(op->getResult(0).getType()); if (!type || !type.getElementType().isF32()) { return failure(); } return success( op->hasOneUse() && IsWeightQuantizableFunction(*op->getUses().begin(), type.getRank())); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
// doesn't require quantization. auto tensor_type = mlir::dyn_cast<TensorType>(value.getType()); if (!tensor_type) { // There are none type values. return; } if (!tensor_type.getElementType().isF32()) return; // If the result is consumed by a quantize op, it has been quantized. if (value.hasOneUse() && llvm::isa<TFL::QuantizeOp>(*value.getUsers().begin())) return;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
dyn_cast_or_null<DequantizeOpT>(operand.getDefiningOp())) { is_operand_or_result_modified = true; inputs.push_back(dq_op.getOperand()); } else if (!ele_type.isF32()) { // If the operand is an integer tensor, then it doesn't require the // DequantizeOp in the pattern. inputs.push_back(operand); } else { return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
for (OpOperand &input : op->getOpOperands()) { Type element_type = getElementTypeOrSelf(input.get().getType()); // Non-float cases won't be calibrated. if (!element_type.isF32()) { continue; } // Skip when there is any already existing CustomAggregatorOp found. Operation *defining_op = input.get().getDefiningOp();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 14.8K bytes - Viewed (0)