- Sort Score
- Result 10 results
- Languages All
Results 41 - 50 of 86 for getAxes (0.16 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc
if (!type || !type.getElementType().isF32()) { return failure(); } return success( op->hasOneUse() && IsWeightQuantizableFunction(*op->getUses().begin(), type.getRank())); } // Checks if the operand is second operand of `tf.XlaCallModule` op for // `stablehlo.convolution` or `stablehlo.dot_general` with fully_quantizable // trait.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 05:56:10 UTC 2024 - 10.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
// bias is used immediately by the user. This assumption is always correct // after constant folding. bool UsedAsBias(Value value) { for (auto &use : value.getUses()) { auto biases = TFL::GetOpQuantSpec(use.getOwner())->biases_params; if (biases.find(use.getOperandNumber()) != biases.end()) return true; } return false; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 9.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
if (!type || !type.getElementType().isF32()) return false; Value value = op.getResult(); // Check whether dynamic range quantization can be applied. for (auto& use : value.getUses()) { Operation* user = use.getOwner(); int operand_num = use.getOperandNumber(); std::unique_ptr<OpQuantSpec> spec = GetTFOpQuantSpec(user);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
auto values = tf_concat_op.getValues(); auto output_type = tf_concat_op.getOutput().getType(); // Extract axis attribute from constant axis tensor ElementsAttr axis; if (!matchPattern(tf_concat_op.getAxis(), m_Constant(&axis))) return failure(); IntegerAttr axis_int = ExtractSingleElementAsInteger(axis); // "axis" operand could be a i64 tensor. Resolve it here. IntegerAttr axis_i32;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 20:06:54 UTC 2024 - 45.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/collection_ops_util.cc
} std::optional<RankedTensorType> GetElementTypeFromAccess( Value collection, ModuleOp module, llvm::function_ref<std::optional<Type>(Operation*)> infer_from_op) { for (auto& use : collection.getUses()) { if (auto while_op = llvm::dyn_cast<TF::WhileOp>(use.getOwner())) { auto body = while_op.body_function(); assert(body); auto type_from_body = GetElementTypeFromAccess(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 14.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc
// updated in the function. llvm::SmallSet<int, 4> indexes; for (BlockArgument &arg : func.getArguments()) { if (tensor_list_args.contains(arg.getArgNumber())) { for (const mlir::OpOperand &use : arg.getUses()) { mlir::Operation *op = use.getOwner(); // Currently we only check if the tensorlist argument is consumed by // `TensorListPushBack` or `TensorListResize`, since those are the only
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 70.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_driver.cc
// Unsure if this check is required. if (state.pos != pos) { return; } } if (pos == RequantizeState::ON_OUTPUT) { Operation* user = value.getUses().begin().getUser(); if (isa<quantfork::QuantizeCastOp>(user)) { // The requantize op is inserted between `quantize` and `dequantize` ops. value = user->getResult(0);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 38.1K bytes - Viewed (0) -
pkg/volume/util/resize_util.go
func GenericResizeFS(host volume.VolumeHost, pluginName, devicePath, deviceMountPath string) (bool, error) { resizer := mount.NewResizeFs(host.GetExec(pluginName)) return resizer.Resize(devicePath, deviceMountPath)
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Mon Jul 17 19:30:35 UTC 2023 - 14.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
// to it have the same shape, we need to follow use-def chain of ops that // (conceptually) modify it i.e., ops that take an input TensorList and // produce an output TensorList. for (auto& use : tensorlist.getUses()) { if (auto push = llvm::dyn_cast<TensorListPushBackOp>(use.getOwner())) { auto element_type = mlir::dyn_cast<RankedTensorType>(push.getTensor().getType());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 07:28:49 UTC 2024 - 134.1K bytes - Viewed (0) -
pkg/controller/volume/expand/expand_controller.go
} func (expc *expandController) GetMounter(pluginName string) mount.Interface { return nil } func (expc *expandController) GetExec(pluginName string) utilexec.Interface { return utilexec.New() } func (expc *expandController) GetHostName() string { return "" } func (expc *expandController) GetHostIP() (net.IP, error) {
Registered: Sat Jun 15 01:39:40 UTC 2024 - Last Modified: Sat May 04 18:33:12 UTC 2024 - 16.2K bytes - Viewed (0)