- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 522 for OP (0.02 sec)
-
tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h
// Returns the entry function name for the given tf.XlaCallModule op. Returns // empty string if such attribute does not exist. StringRef GetEntryFunctionName(TF::XlaCallModuleOp op); // Checks whether the given op contains QuantizationTrait::FullyQuantizable. inline bool HasQuantizableTrait(Operation* op) { return op->hasAttrOfType<StringAttr>(kQuantTraitAttrName) && op->getAttrOfType<StringAttr>(kQuantTraitAttrName).getValue().str() ==
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_device.cc
rewriter.replaceOpWithNewOp<tf_device::ReturnOp>(return_op, cluster_vals); // Rewrite the cluster op. rewriter.setInsertionPoint(op); auto new_op = rewriter.create<tf_device::ClusterOp>( op->getLoc(), new_return.getOperandTypes(), op->getOperands(), op->getAttrs()); rewriter.inlineRegionBefore(op.getBodyRegion(), new_op.getBodyRegion(), new_op.getBodyRegion().end()); int idx = 0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 33.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tensorlist.cc
LogicalResult matchAndRewrite(TF::TensorListPopBackOp op, PatternRewriter& rewriter) const override { // It is currently not possible to easily pack the output of a multi-result // op into an op with a single varidic output in `.td`. auto converted = rewriter.create<TFL::CustomOp>( op->getLoc(), op->getResultTypes(), op->getOperands(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 23:04:40 UTC 2024 - 10.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
int quantize_operand_num = quant_op.second; Type expressed_type = op.getResult().getType(); Type cast_type = quant_type.castFromExpressedType(expressed_type); // Insert DQ-op if it does not exist yet. Otherwise, just rewire without // creating a new DQ-op. for (auto connected_op : op->getUsers()) { auto q_op = llvm::dyn_cast_or_null<quantfork::QuantizeCastOp>(connected_op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/jit/build_xla_ops_pass_test.cc
auto xla_compile = NodeWith(Op("_XlaCompile"), Attr("must_compile", false)); auto predicated_compilation_key = NodeWith(Op("Switch"), Inputs(Out(0, xla_compile), Out(1, xla_compile))); auto xla_run = NodeWith(Op("_XlaRun"), Inputs(Out(1, predicated_compilation_key))); auto tf_call = NodeWith(Op("StatefulPartitionedCall"), CtrlDeps(NodeWith(Op("Identity"),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 12.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.cc
parsed_op_name, attrs, node_def))) { return failure(); } if (parsed_op_name != tf_op_name) { return op.emitOpError( "TF op names in 'custom_code' and 'custom_option' don't match"); } const tensorflow::OpDef* op_def; // This will fail only if the op is not a registered TensorFlow op.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/c/eager/c_api_test_util.cc
TFE_OpSetAttrType(op, "T", TFE_TensorHandleDataType(in)); TFE_OpSetAttrInt(op, "group_size", group_size); TFE_OpSetAttrInt(op, "group_key", 123); TFE_OpSetAttrInt(op, "instance_key", 456); TFE_OpSetAttrString(op, "merge_op", "Add", 3); TFE_OpSetAttrString(op, "final_op", "Id", 2); std::vector<int64_t> subdiv_offsets; TFE_OpSetAttrIntList(op, "subdiv_offsets", subdiv_offsets.data(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 22:37:46 UTC 2024 - 23.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/tac/transforms/raise_target_subgraphs.cc
} } if (skip_cpu) { for (auto& op : block) { auto op_device = GetInferenceDeviceTypeForOp(&op); if (op_device_is(op, kCpuDeviceName)) // The recently raised func is device type cpu & `op` is a "CPU". // Recursivley call again to raise any non-"CPU" subgraphs contained // within nested region of `op`. for (auto& region : op.getRegions())
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.4K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/schedule.go
if b != f.Entry { f.Fatalf("%s appeared outside of entry block, b=%s", v.Op, b.String()) } score[v.ID] = ScorePhi case v.Op == OpArg || v.Op == OpSP || v.Op == OpSB: // We want all the args as early as possible, for better debugging. score[v.ID] = ScoreArg case v.Op == OpInitMem: // Early, but after args. See debug.go:buildLocationLists score[v.ID] = ScoreInitMem
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Mon Apr 08 15:53:17 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
}; bool IsActivationFunction(Operation *op) { return isa<EluOp, ReluOp, Relu6Op>(op); } // Finds and returns an activation op that uses the result of `op`. If there are // multiple such activations, one is returned (with no guarantee as to which // one). If there are no activation functions that use the output, returns // nullptr. Operation *GetActivation(Value op) { for (auto &use : op.getUses()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0)