- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 101 for type_attr (0.17 sec)
-
tensorflow/compiler/mlir/tfr/ir/tfr_ops.cc
if (matchPattern(cst_tensor_op.getArg(), m_Constant(&array))) { llvm::DenseSet<Type> all_types; for (auto it : array) { TypedAttr typed_attr = it.dyn_cast<TypedAttr>(); if (!typed_attr) return failure(); all_types.insert(typed_attr.getType()); } if (all_types.size() != 1) return failure(); ShapedType new_out_type = RankedTensorType::get(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Nov 21 16:55:41 UTC 2023 - 38.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/decompose_reduce_dataset.cc
llvm::SmallVector<Attribute, 2> shape_attrs; llvm::SmallVector<Attribute, 2> type_attrs; for (Type type : dataset_types) { shape_attrs.push_back( TF::ShapeAttr::get(builder.getContext(), mlir::cast<ShapedType>(type))); type_attrs.push_back(TypeAttr::get(getElementTypeOrSelf(type))); } auto anonymous_iterator = builder.create<AnonymousIteratorV3Op>(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/decompose.cc
} attribute = TypeAttr::get(type); } Value attr_cst; // Wrap these special attributes as a special TFR constant, so the SSA // value has a valid type to be used as TFR function argument. These // attributes are not expected to be manipulated by the lowering passes. if (mlir::isa<TypeAttr>(attribute) || mlir::isa<ArrayAttr>(attribute) ||
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.6K bytes - Viewed (0) -
tensorflow/c/kernels/bitcast_op_test.cc
Status status; NodeDef def; def.set_op("Bitcast"); def.set_device(DEVICE_CPU); AttrValue typeAttr; SetAttrValue(input_tensor->dtype(), &typeAttr); AttrValue outTypeAttr; SetAttrValue(out_type, &outTypeAttr); (*def.mutable_attr())["T"] = typeAttr; (*def.mutable_attr())["type"] = outTypeAttr; def.add_input(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Jul 18 15:10:51 UTC 2022 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/constant_utils.cc
#include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/platform/status.h" #include "tsl/platform/statusor.h" namespace mlir { namespace TFL { absl::StatusOr<TypedAttr> CreateTypedAttr(ShapedType shaped_type, int value) { Type element_type = shaped_type.getElementType(); if (element_type.isF16()) { auto floatType = mlir::FloatType::getF16(element_type.getContext());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/validators.h
} /// Returns whether the given `a` and `b` have broadcast-compatible /// types. bool IsBroadcastableElementsAttrs(mlir::TypedAttr a, mlir::TypedAttr b); // Returns true if every dimension of the attribute is 1 except the last one. bool IsDimensionsDegenerateExceptLastOne(mlir::TypedAttr val); // Returns true if every element is 1 except the last one. bool IsDimensionsDegenerateExceptLastOne(ArrayRef<int64_t> elements_shape);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/device_target.cc
if (!rop) return failure(); llvm::SmallVector<Type, 4> input_specs, out_specs; for (auto spec : rop.getInputSpecs()) { input_specs.push_back(spec.cast<TypeAttr>().getValue()); } for (auto spec : rop.getOutputSpecs()) { out_specs.push_back(spec.cast<TypeAttr>().getValue()); } auto in_spec = input_specs[0].dyn_cast<UniformQuantizedType>(); // TODO(fengliuai): handles the PerAxis QuantizedType.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 08 10:41:08 UTC 2024 - 7.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/validators.cc
return !std::any_of(elements.begin(), elements.end(), [](Attribute e) { return mlir::cast<IntegerAttr>(e).getValue() != 1; }); } bool IsBroadcastableElementsAttrs(mlir::TypedAttr a, mlir::TypedAttr b) { // This would return false if we had unranked tensors (where they should // probably be considered as broadcastable), but given we are working with // attributes here that shouldn't be an issue,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/utils/lstm_utils.cc
/*asymmetric_quantize_inputs=*/mlir::BoolAttr(), /*input_to_input_intermediate=*/mlir::TypeAttr(), /*input_to_forget_intermediate=*/mlir::TypeAttr(), /*input_to_cell_intermediate=*/mlir::TypeAttr(), /*input_to_output_intermediate=*/mlir::TypeAttr(), /*effective_hidden_scale_intermediate=*/mlir::TypeAttr()); // Cast the static shaped lstm result to FuncOp's signature -
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 36.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/quantization_context.cc
input_specs.push_back(original_input_specs[i]); } else if (requantize.pos == RequantizeState::ON_OUTPUT) { input_specs.push_back(TypeAttr::get(requantize.params)); } else { input_specs.push_back(TypeAttr::get(state.params)); } } op->setAttr("input_specs", ArrayAttr::get(context, input_specs)); llvm::SmallVector<Attribute, 4> output_specs;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 08 01:38:03 UTC 2024 - 13.1K bytes - Viewed (0)