- Sort Score
- Result 10 results
- Languages All
Results 1 - 10 of 27 for hasAttr (0.52 sec)
-
src/testing/slogtest/slogtest.go
}, checks: []check{ hasAttr("a", "b"), hasAttr("k", "v"), }, }, { name: "groups", explanation: withSource("a Handler should handle Group attributes"), f: func(l *slog.Logger) { l.Info("msg", "a", "b", slog.Group("G", slog.String("c", "d")), "e", "f") }, checks: []check{ hasAttr("a", "b"), inGroup("G", hasAttr("c", "d")), hasAttr("e", "f"), }, },
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed Nov 08 17:55:47 UTC 2023 - 9.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/tpu_validate_inputs.cc
} else { shardings_result.push_back(sharding_next); } } } bool IsValidShardingTupleForArity(Operation* op) { if (!op->hasAttr(kXLAShardingAttr) && !op->hasAttr(kShardingAttr)) { return true; } std::string shard_string; if (op->hasAttr(kXLAShardingAttr)) { shard_string = op->getAttrOfType<StringAttr>(kXLAShardingAttr).strref().str(); } else {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 07 06:51:01 UTC 2024 - 21.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_program_key.cc
preprocess_op->setOperand(0, program); } LogicalResult VerifyAllProgramKeyOperandsReplaced(Operation* module) { WalkResult result = module->walk([&](Operation* op) { if (!op->hasAttr(kMiniBatchSplitsAttr) && !op->hasAttr(kMiniBatchCsrAttr)) return WalkResult::advance(); Operation* defining = op->getOperand(0).getDefiningOp(); if (llvm::dyn_cast_or_null<TF::ConstOp>(defining)) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 17.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/call_graph_util_test.cc
} func.func @func(%arg0: tensor<i32>) -> tensor<i32> { func.return %arg0 : tensor<i32> } )mlir"; auto has_compile_device_type = [](mlir::SymbolUserOpInterface op) { return op->hasAttr(tensorflow::kCompileDeviceTypeAttr); }; mlir::MLIRContext context; context.loadDialect<mlir::func::FuncDialect, mlir::TF::TensorFlowDialect>(); mlir::OwningOpRef<mlir::ModuleOp> module =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Dec 20 04:39:18 UTC 2023 - 11.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
StringRef function_name = mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue(); if (!function_name.starts_with("composite_") || !call_op->hasAttr(kQuantTraitAttrName)) { return failure(); } absl::Status check_status; // TODO(b/270906404): Support weight-only gather for uniform quantized opset // in PTQ mode
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
<< " but got " << element_ty; }); return false; } // FusedMatMul kernel does not support grad_a/grad_b attrs if ((matmul->hasAttr("grad_a") && mlir::cast<BoolAttr>(matmul->getAttr("grad_a")).getValue()) || (matmul->hasAttr("grad_b") && mlir::cast<BoolAttr>(matmul->getAttr("grad_b")).getValue())) { (void)rewriter.notifyMatchFailure(matmul, [&](Diagnostic &diag) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_sequencing.cc
if (mlir::isa<TF::ResourceType>(tensor_type.getElementType())) { return true; } } return false; } bool IsTPUOp(mlir::Operation* op) { return op->hasAttr(TF::kReplicationInfoAttr); } StringAttr GetReplicationAttr(mlir::Operation* op) { return op->getAttrOfType<StringAttr>(TF::kReplicationInfoAttr); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 39.4K bytes - Viewed (0) -
fastapi/_compat.py
return ( _annotation_is_complex(annotation) or _annotation_is_complex(origin) or hasattr(origin, "__pydantic_core_schema__") or hasattr(origin, "__get_pydantic_core_schema__") ) def field_annotation_is_scalar(annotation: Any) -> bool: # handle Ellipsis here to make tuple[int, ...] work nicely
Registered: Mon Jun 17 08:32:26 UTC 2024 - Last Modified: Thu Apr 18 19:40:57 UTC 2024 - 22.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc
// version 8 and above. constexpr StringRef kUsesShapePolymorphismAttr = "jax.uses_shape_polymorphism"; bool IsInLiftedFunc(Operation* op) { if (op == nullptr) return false; return op->getParentOfType<func::FuncOp>()->hasAttr(kFusedFunctionAttr); } bool IsInStableHloOpRegion(Operation* op) { if (op == nullptr) return false; auto parent_op = op->getParentOp(); return parent_op != nullptr && stablehlo::IsStablehloOp(parent_op); }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 17:58:54 UTC 2024 - 21.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_tf_xla_call_module_to_stablehlo_pass.cc
PatternRewriter &rewriter) const override { // Removes the custom call with sharding op if the operand type is the // same as the result type. if (op->hasAttr(kShardingAttr) && op.getCallTargetName() == kShardingName && op.getNumOperands() == 1 && op.getNumResults() == 1 && op.getOperands().front().getType() == op.getResults().front().getType()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jan 25 09:43:18 UTC 2024 - 10.9K bytes - Viewed (0)