- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 2,493 for OP (0.03 sec)
-
tensorflow/compiler/mlir/lite/experimental/common/outline_operations.cc
auto update_from_op = [&](Operation* op) { been_defined.insert(op->getResults().begin(), op->getResults().end()); for (Value input : op->getOperands()) { if (been_defined.contains(input)) { continue; } results.insert(input); } }; for (Operation* op : partition_ops) { update_from_op(op); op->walk<WalkOrder::PreOrder>([&](Block* nested_block) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc
// Create a new dequantize op that is propagated. rewriter.setInsertionPointAfter(user_op); TF::PartitionedCallOp new_dequantize_op = cast<TF::PartitionedCallOp>(rewriter.clone(*original_dequantize_op)); // Skip the original dequant op and connect the op before dequantize to the // user op. user_op->setOperand(user_idx, op_before_dequantize);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/passes/quantize_weight.cc
// old ConstantOp is guaranteed to have one F32->F16 convert op regardless // of its number of users. rewriter.setInsertionPointAfter(op); // create new F16 constant op in that location ConstantOp new_const = rewriter.create<ConstantOp>( op->getLoc(), new_result_type, new_value_attr); ConvertOp dcast = rewriter.create<ConvertOp>(op->getLoc(), old_result_type, new_const);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc
}) .wasInterrupted(); } // Returns whether `op` or any ancestors of `op` are outside compiled. bool HasOutsideCompilationAncestor(Operation* op) { while (op) { if (op->hasAttr(kXlaOutsideCompilationAttr)) { return true; } op = op->getParentOp(); } return false; } // Returns whether any ancestors of `op` are outside compiled.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 68.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_communication.cc
} // Checks if an op is a TF/XLA communication op. bool IsCommunicationOp(Operation* op) { return isa<TF::_XlaHostComputeMlirOp, TF::XlaSendToHostOp, TF::XlaRecvFromHostOp>(op); } // Checks if an op is a supported HLO control flow op. bool IsControlFlowOp(Operation* op) { return isa<IfOp, WhileOp>(op); } // Collects control flow op ancestors of a given op, up until FuncOp. If any
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 40.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/convert_tf_control_flow_to_scf.cc
}; ValueRange opInput = op.getInput(); TypeRange scf_block_arguments_type = opInput.getType(); // Create the `scf.while` op. auto scf_while_op = rewriter.create<scf::WhileOp>( op.getLoc(), op.getResultTypes(), opInput); // Create the `before` block of the `scf.while` op (with an `scf.condition` // op as the terminator). Note that the arguments' type of this block is
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 03 12:35:38 UTC 2022 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_stablehlo_custom_call_to_composite.cc
auto calledComputations = op.getCalledComputations(); if (!calledComputations || calledComputations.size() != 1) return op->emitError("expected exactly one called_computation"); auto decomposition = mlir::cast<FlatSymbolRefAttr>(calledComputations[0]); auto composite = rewriter.create<mlir::stablehlo::CompositeOp>( op.getLoc(), op.getResultTypes(), op.getOperands(), name.str(), attrs, decomposition.getValue());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.4K bytes - Viewed (0) -
tensorflow/c/experimental/gradients/math_grad.h
GradientFunction* AddRegisterer(const ForwardOperation& op); GradientFunction* ExpRegisterer(const ForwardOperation& op); GradientFunction* MatMulRegisterer(const ForwardOperation& op); GradientFunction* SqrtRegisterer(const ForwardOperation& op); GradientFunction* NegRegisterer(const ForwardOperation& op); GradientFunction* SubRegisterer(const ForwardOperation& op); GradientFunction* MulRegisterer(const ForwardOperation& op);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Dec 03 22:28:48 UTC 2020 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_head_tail_outside_compilation.cc
llvm::all_of(cluster_op.getUsers(), [&](Operation* op) { return op == terminator || tail_outside_compiled_ops_set.count(op); }); if (!can_be_extracted) continue; // Collect operands of cluster op that are generated within the cluster. // These values should be returned by the cluster. cluster_op.walk([&](Operation* op) { for (Value operand : op->getOperands()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 20.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/internal/passes/verify_input_dialect_to_executor_pass.cc
op->getDialect()->getNamespace().str() + " which is not an accepted dialect"; op->emitError() << error; return WalkResult::interrupt(); } if (IsTfDeviceClusterFuncOp(op)) { std::string error = "failed TF functional to executor validation, op " "tf_device.cluster_func is not allowed"; op->emitError() << error;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Dec 08 16:32:56 UTC 2023 - 2.7K bytes - Viewed (0)