- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 63 for qtype_attr (0.15 sec)
-
tensorflow/compiler/mlir/tensorflow/translate/export_graphdef.cc
DataType dtype; TF_RETURN_IF_ERROR(ConvertToDataType(arg_type.getElementType(), &dtype)); AttrValue type_attr; type_attr.set_type(dtype); (*node_def->mutable_attr())["T"] = type_attr; AttrValue index_attr; index_attr.set_i(index); (*node_def->mutable_attr())["index"] = index_attr; if (auto device_attr =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 01 11:17:36 UTC 2024 - 35.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.cc
DataType dtype; TF_RETURN_IF_ERROR(ConvertToDataType(arg_type.getElementType(), &dtype)); AttrValue type_attr; type_attr.set_type(dtype); (*node_def->mutable_attr())["T"] = type_attr; AttrValue index_attr; index_attr.set_i(index); (*node_def->mutable_attr())["index"] = index_attr; if (auto device_attr =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 06 23:04:51 UTC 2024 - 35.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
// index. template <typename LstmOp> inline QuantizedType GetIntermediateElementType(LstmOp op, int tensor_index) { if (tensor_index < 0 || tensor_index > 4) return nullptr; TypeAttr attr = op->template getAttrOfType<TypeAttr>( intermediate_attributes[tensor_index]); if (!attr) { return nullptr; } return QuantizedType::getQuantizedElementType(attr.getValue()); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc
// folding logic will use a "arith.constant" op to replace the // "tf.FakeQuantWithMinMaxVarsOp", the "tfl.quantize" op is used to preserve // the quantization parameters as a TypeAttr and "tfl.dequantize" op used to // convert the output type to the next op. Here are the transformations: // // input min cst max cst input min cst max cst
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_attr.cc
case AttrValue::kB: return builder->getBoolAttr(value.b()); case AttrValue::kType: { mlir::Type type; TF_RETURN_IF_ERROR(ConvertDataType(value.type(), *builder, &type)); return mlir::TypeAttr::get(type); } case AttrValue::kShape: return ConvertTensorShapeProto(value.shape(), builder->getContext()); case AttrValue::kTensor: return ConvertTensorProto(value.tensor(), builder);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 4.8K bytes - Viewed (0) -
tensorflow/cc/framework/fuzzing/cc_op_fuzz_gen.cc
std::string type = "DT_UINT8"; if (arg.type() != DT_INVALID) { type = DataType_Name(arg.type()); } else if (!arg.type_attr().empty()) { OpDef_AttrDef attr = *FindAttr(arg.type_attr(), op_info.graph_op_def); if (attr.has_default_value() && attr.default_value().value_case() == AttrValue::kType) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jan 27 16:26:51 UTC 2024 - 13K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
// before the op being constant folded. Since the constant // folding logic will use a "arith.constant" op to replace the // "tf.FakeQuantWithMinMaxVarsOp", the "quant.qcast" op is used to preserve // the quantization parameters as a TypeAttr and "quant.dcast" op used to // convert the output type to the next op. Here are the transformations: // // input min cst max cst input // \ | | |
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h
// before the op being constant folded. Since the constant // folding logic will use a "arith.constant" op to replace the // "tf.FakeQuantWithMinMaxVarsOp", the "tfl.quantize" op is used to preserve // the quantization parameters as a TypeAttr and "tfl.dequantize" op used to // convert the output type to the next op. Here are the transformations: // // input min cst max cst input min cst max cst
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/quantize_variables.cc
builder.setInsertionPoint(assign_variable_op); auto new_q_op = builder.create<QuantizeOp>( assign_variable_op.getLoc(), ref_qtype, dq_op.getInput(), TypeAttr::get(ref_qtype)); auto new_assign_variable_op = builder.create<AssignVariableOp>( assign_variable_op.getLoc(), assign_variable_op.getResourceId(), new_q_op.getResult());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/raise_to_tf.cc
if (input_types.size() <= 1) return; Type target_input_type = mlir::cast<TypeAttr>(input_types[0]).getValue(); auto result_type = UnrankedTensorType::get(target_input_type); for (auto i = 1; i < input_types.size(); ++i) { Type current_input_type = mlir::cast<TypeAttr>(input_types[i]).getValue(); if (current_input_type != target_input_type) { input_values[i] =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21.8K bytes - Viewed (0)