- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 46 for OP (0.06 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/uniform_quantized_stablehlo_to_tfl_pass.cc
LogicalResult match(stablehlo::ReshapeOp op) const override { return success(IsOpFullyQuantized(op)); } void rewrite(stablehlo::ReshapeOp op, PatternRewriter& rewriter) const override { rewriter.replaceOpWithNewOp<TFL::ReshapeOp>( op, op.getOperand(), CreateI32ShapeConstantOp(op.getResult().getType(), op->getLoc(), rewriter));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Apr 22 09:00:19 UTC 2024 - 99.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
LogicalResult ConcatOffsetOp::verify() { ConcatOffsetOp op = *this; if (op.getN() < 2) return op.emitOpError() << "requires N to be at least 2, got " << op.getN(); if (op.getShape().size() != op.getOffset().size()) return op.emitOpError() << "requires sizes of shapes and offsets to be the same, got sizes " << op.getShape().size() << " and " << op.getOffset().size(); auto ranked_dim =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 146.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
mlir::LogicalResult ReshapeOp::verify() { ReshapeOp op = *this; auto error_handler = [&op](const llvm::Twine& message) -> LogicalResult { return op.emitOpError() << message; }; TensorType expected_ty; if (failed(GetReshapeOutputType(op.getInput(), op.getShape(), error_handler, expected_ty))) return failure(); auto output_ty = op.getType().dyn_cast<RankedTensorType>();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
LogicalResult ShapeNOp::verify() { ShapeNOp op = *this; const size_t num_tensors = op.getN(); if (op.getNumOperands() != num_tensors) return op.emitOpError() << "requires " << num_tensors << " operand(s), got " << op.getNumOperands() << " operand(s)"; if (op.getNumResults() != num_tensors) return op.emitOpError() << "requires " << num_tensors << " result(s), got "
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 22:07:10 UTC 2024 - 170.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
} // Returns true if the op creates a TensorList. bool IsTensorListInitOp(Operation* op) { return isa<TensorListReserveOp>(op) || isa<EmptyTensorListOp>(op) || isa<TensorListFromTensorOp>(op); } // Returns the `element_shape` operand of the ops that create a TensorList. Value GetElementShapeOperand(Operation* op) { if (auto empty_tl = dyn_cast<EmptyTensorListOp>(op)) return empty_tl.getElementShape();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 07:28:49 UTC 2024 - 134.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc
} return failure(); } // Walks up the op and ignore all precedding ops of type Tys. // Returns the first producer op whose type is not in Tys. template <typename... Tys> Value recursivelyWalkUp(Value op) const { while (llvm::isa_and_nonnull<Tys...>(op.getDefiningOp())) { Operation* producer = op.getDefiningOp(); op = producer->getOperand(/*idx=*/0); } return op;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 154.9K bytes - Viewed (0) -
src/cmd/vendor/github.com/ianlancetaylor/demangle/ast.go
} } if op != nil { ps.writeString(op.Name) if ps.llvmStyle && op.Name == "noexcept" { ps.writeByte(' ') } } else if c, ok := u.Op.(*Cast); ok { ps.startScope('(') ps.print(c.To) ps.endScope(')') } else { ps.print(u.Op) } if !u.Suffix { isDelete := op != nil && (op.Name == "delete " || op.Name == "delete[] ")
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Fri May 31 19:48:28 UTC 2024 - 105.8K bytes - Viewed (0) -
src/cmd/compile/internal/ssa/_gen/generic.rules
(Add64 (Add64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Add64 <t> z x)) (Add32 (Add32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Add32 <t> z x)) (Add16 (Add16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Add16 <t> z x)) (Add8 (Add8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Add8 <t> z x))
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu May 16 22:21:05 UTC 2024 - 135.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_export.cc
} // CustomTfOp is just a wrapper around a TF op, we export the custom Op // not the wrapper, so we fetch the op from the region. if (auto custom_op = dyn_cast<mlir::TFL::CustomTfOp>(inst)) { // If we have custom op with a region, then use the first op in the // region, if it exists, otherwise just use params for custom op. if (!custom_op.getBody().empty()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 21:41:49 UTC 2024 - 164.5K bytes - Viewed (0) -
src/cmd/internal/obj/s390x/asmz.go
args[2+i] = a.Class - 1 } // Lookup op in optab. ops := oprange[p.As&obj.AMask] cmp := [len(args)]*[C_NCLASS]bool{} for i := range cmp { cmp[i] = &xcmp[args[i]] } for i := range ops { op := &ops[i] if cmp[0][op.a1] && cmp[1][op.a2] && cmp[2][op.a3] && cmp[3][op.a4] && cmp[4][op.a5] && cmp[5][op.a6] { p.Optab = uint16(cap(optab) - cap(ops) + i + 1) return op
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Apr 16 17:46:09 UTC 2024 - 176.7K bytes - Viewed (0)