- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 175 for Operands (0.12 sec)
-
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_with_tf2xla.cc
Operation* op, ArrayRef<Value> operands, ConversionPatternRewriter& rewriter) const override { // This pattern is a conversion pattern because we want to specify a type // converter. However, this pattern still uses the original op's operands // while creating the ops so make sure there aren't any type changes between // the original op operands and the operands during the conversion.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 28 21:49:50 UTC 2024 - 9.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_arith_ops_folder.h
} else { return {}; } // Fold: Op(Operand, Identity) -> Operand. if (rhs_attr && is_valid_broadcasting(lhs_type, rhs_type, result_type)) { if (rhs_attr.isSplat() && rhs_attr.getSplatValue<Attribute>() == identity_attr) return arithmetic_op.getX(); } // Fold: Op(Identity, Operand) -> Operand for commutative operations. if (lhs_attr && is_commutative &&
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 5.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/experimental/common/outline_operations.cc
matchPattern(op, m_Constant()) || isa<QConstOp>(op); } // Pre-order traverse, adding results and BlockArgs to `been_defined` and // collecting operands not contained within `been_defined`. If we encounter an // operand that references a Value that has been defined (and added to // `been_defined`) it is garuanteed that the Value definition is not contained
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc
}; // Rewrites the given op to `OpT` op after adding the given operand at the end. template <typename OpT> OpT AddOperandAndRewriteAs(Operation* op, Value operand, NamedAttrList attr, OpBuilder* builder) { builder->setInsertionPoint(op); auto operands = llvm::to_vector<4>(op->getOperands()); operands.push_back(operand); auto new_op = builder->create<OpT>(op->getLoc(), op->getResultTypes(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 15 22:55:42 UTC 2024 - 4.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-include-tf2xla-fallback.mlir
// The fallback pattern uses dot_general without broadcast on operands and then // transposes the output which is faster. However, the fallback pattern doesn't // support dynamic shaped operands like the native lowering. Verify that // fallback lowering is preferred for static shaped operands when available. // CHECK-LABEL: batchmatmulv2
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 16 19:04:03 UTC 2023 - 3.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_op_interfaces.td
let description = [{ Operation supports folding operand(s) transposes into the operation itself. (1) Operation might have layout dependent operands and results... Example: MaxPool(Transpose($arg, $perm)) -> Transpose(MaxPool($arg, $perm)) (2) ... or it might have only layout dependent operands: Example: Mean(Transpose($arg, $reduction_dims))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Nov 30 19:07:07 UTC 2022 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/colocate_tpu_copy_with_dynamic_shape.cc
device = d->getDevice(); break; } } for (auto *operand : operands) propagateIfChanged(operand, operand->SetDevice(device)); } } void visitBranchOperand(OpOperand &operand) override {} void visitCallOperand(OpOperand &operand) override {} void setToExitState(Device *lattice) override {} };
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Aug 23 00:30:27 UTC 2023 - 5.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/analyze_variables.cc
} // If any of the operands is a resource type, then we break // and mark the module as not valid for TFLite legalization. // Note: this might disable native variables in more than needed cases. // TODO(b/189370197): Enhance variable analysis. for (auto operand : op->getOperands()) { if (mlir::isa<TF::ResourceType>( getElementTypeOrSelf(operand.getType()))) { legalize_to_tfl = false;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 4.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_read_for_write.cc
// Update caller and function types with new read operands. auto operands = llvm::to_vector<4>(cluster_func.getOperands()); operands.append(read_operands.begin(), read_operands.end()); auto loc = cluster_func.getLoc(); auto new_cluster_func = builder.create<tf_device::ClusterFuncOp>( loc, cluster_func.getResultTypes(), operands, cluster_func->getAttrs());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 16:54:40 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/passes/passes.h
std::unique_ptr<OperationPass<func::FuncOp>> CreateDecomposeTFOpsPass( std::optional<ModuleOp> tfr_module = std::nullopt); // Rewrites quantized operands and results with their storage types. // This pass should be run at module level after decomposition, if there are // quantized operands or results. std::unique_ptr<OperationPass<ModuleOp>> CreateRewriteQuantizedIOPass(); // Raise to TF ops.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jun 08 01:19:25 UTC 2023 - 2K bytes - Viewed (0)