- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 2,493 for OP (0.03 sec)
-
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h
// Checks if an op is inside a lifted function. // If the given op pointer is a nullptr, returns false. bool IsInLiftedFunc(Operation* op); // Checks if the op is inside a StableHLO op with region. // If the given op pointer is a nullptr, returns false. bool IsInStableHloOpRegion(Operation* op); // Checks if a given einsum op is supported for XlaDotV2 quantization.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/README.md
new op is presented as a single node in the graph, thus optimization passes and kernels can easily be specialized to this op for better performance. * *Automatic shape/type inference support*: No shape functions are required for the new op; * *Automatic gradient support (WIP)*: The user doesn't need to author gradient a function of the op for training. ### Use Cases
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 29 18:32:13 UTC 2022 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
// quantization. For dynamic range quantizable ops, it refers to the op // specification for checking the support. For custom ops, it checks the // provided map. bool hasInt8QuantizableOperandAt(Operation* op, int operand_index) const { if (visited_nonquantizable_ops_->contains(op)) { return false; } if (auto custom_op = llvm::dyn_cast_or_null<CustomOp>(op)) { std::string op_name = custom_op.getCustomCode().str();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fold_broadcast.cc
return true; }; return RewriteOp(op, rewriter, get_broadcasted_shape); } template <typename Op> LogicalResult ConvertResultsBroadcastableShapeOp::RewriteEqOp( Operation* op, PatternRewriter& rewriter) const { auto eq_op = llvm::dyn_cast_or_null<Op>(op); if (eq_op && eq_op.getIncompatibleShapeError()) return RewriteOp(op, rewriter, OpTrait::util::getBroadcastedShape); return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.cc
tensorflow::tpu::TPUCompileMetadataProto* metadata) { auto input_shardings = op->getAttrOfType<ArrayAttr>(tensorflow::kInputShardingAttr); if (!input_shardings) return op.emitOpError( CreateMissingAttributeMsg(tensorflow::kInputShardingAttr)); if (input_shardings.size() != op.getNumOperands()) return op.emitOpError( llvm::formatv(kBadArrayAttrLengthMsg, tensorflow::kInputShardingAttr,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/control_flow_v1.pbtxt
} node { name: "cond/Switch" op: "Switch" input: "Placeholder_1" input: "Placeholder_1" attr { key: "T" value { type: DT_BOOL } } } node { name: "cond/switch_t" op: "Identity" input: "cond/Switch:1" attr { key: "T" value { type: DT_BOOL } } } node { name: "cond/switch_f" op: "Identity" input: "cond/Switch" attr {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 23 21:23:31 UTC 2020 - 3.6K bytes - Viewed (0) -
src/cmd/vendor/golang.org/x/arch/x86/x86asm/gnu.go
switch inst.Op { case FDIV: inst.Op = FDIVR case FDIVR: inst.Op = FDIV case FSUB: inst.Op = FSUBR case FSUBR: inst.Op = FSUB case FDIVP: inst.Op = FDIVRP case FDIVRP: inst.Op = FDIVP case FSUBP: inst.Op = FSUBRP case FSUBRP: inst.Op = FSUBP } } case MOVNTSD:
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Oct 19 23:33:33 UTC 2023 - 21.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
mlir::LogicalResult ReshapeOp::verify() { ReshapeOp op = *this; auto error_handler = [&op](const llvm::Twine& message) -> LogicalResult { return op.emitOpError() << message; }; TensorType expected_ty; if (failed(GetReshapeOutputType(op.getInput(), op.getShape(), error_handler, expected_ty))) return failure(); auto output_ty = op.getType().dyn_cast<RankedTensorType>();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/mark_ops_for_outside_compilation.cc
if (op.getDialect() != tf_dialect) return true; // Assert has a legalization that later removes it so we don't want to outside // compile it ever for performance reasons. if (llvm::isa<mlir::TF::AssertOp>(op)) return true; if (HasStringOperand(op)) return false; if (HasStringResult(op)) return false; if (MatchesPattern(op, supported_ops)) return true; auto abstractOp = op.getRegisteredInfo();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 21.4K bytes - Viewed (0) -
tensorflow/c/eager/custom_device_test.cc
std::unique_ptr<TFE_Op, decltype(&TFE_DeleteOp)> op( TFE_NewOp(context.get(), "VarHandleOp", status.get()), TFE_DeleteOp); ASSERT_TRUE(TF_GetCode(status.get()) == TF_OK) << TF_Message(status.get()); TFE_OpSetAttrType(op.get(), "dtype", TF_FLOAT); TFE_OpSetAttrShape(op.get(), "shape", {}, 0, status.get()); TFE_OpSetAttrString(op.get(), "container", "", 0); TFE_OpSetAttrString(op.get(), "shared_name", "", 0);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 27 23:39:24 UTC 2020 - 18.4K bytes - Viewed (0)