- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 215 for TensorT (0.14 sec)
-
tensorflow/cc/experimental/libtf/value.h
new (&v.data_.dict) T(std::make_shared<T::element_type>()); return v; } /// Constructs a TaggedValue with type TENSOR. static TaggedValue Tensor(tensorflow::AbstractTensorHandle* raw_ptr) { TaggedValue v; v.type_ = TENSOR; using T = decltype(v.data_.tensor); new (&v.data_.tensor) T(raw_ptr, /*add_ref=*/false); return v; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 05:23:45 UTC 2024 - 20.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/fake_quant_per_channel_4bit.pbtxt
} # MLIR-LABEL: func @main # MLIR-SAME: (%[[ARG_0:[a-z0-9]+]]: tensor<1x1x1x256x!quant.uniform<i8:f32, 0.21632751372549019:27>>) -> tensor<1x6x31x!quant.uniform<i8:f32, 0.09363494573854933:22>> # MLIR-SAME: control_outputs = "" # MLIR-SAME: inputs = "input" # MLIR-SAME: outputs = "output" # MLIR: %[[shape:.*]] = arith.constant dense<[1, -1, 31]> : tensor<3xi32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 18.1K bytes - Viewed (0) -
tensorflow/c/kernels_experimental.cc
// copy-on-read mode is false. if (var->tensor()->RefCountIsOne()) { var->copy_on_read_mode.store(true); return absl::OkStatus(); } Tensor tmp; if (variantType) { AllocatorAttributes attr; attr.set_on_host(true); TF_RETURN_IF_ERROR(context->allocate_temp( var->tensor()->dtype(), var->tensor()->shape(), &tmp, attr)); const auto elements_in = var->tensor()->flat<Variant>();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 06:12:29 UTC 2024 - 30.9K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_device.cc
// on-device shape. This is accurate for CPU and GPU devices that neither // transpose nor pad tensors. Status DefaultPaddedShapeFn(const Tensor& tensor, xla::Shape* shape) { const tensorflow::XlaTensor* xla_tensor = tensorflow::XlaTensor::FromTensor(&tensor); if (xla_tensor == nullptr) { return TensorShapeToXLAShape(tensor.dtype(), tensor.shape(), shape); } const xla::ShapedBuffer& shaped_buffer = xla_tensor->shaped_buffer();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon May 20 21:05:42 UTC 2024 - 24.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/passes.h
// Legalize TensorList Ops iff all of them are supported. std::unique_ptr<OperationPass<ModuleOp>> CreateLegalizeTensorListPass(); // Reduce the type precision of some tensor types if all values within that // tensor are within the range of the reduced precision. std::unique_ptr<OperationPass<ModuleOp>> CreateReduceTypePrecisionPass(); // Convervatively pushes transposes through elementwise ops to prepare
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 07 21:29:34 UTC 2024 - 10.9K bytes - Viewed (0) -
tensorflow/c/eager/c_api_experimental.h
TF_Status* status, void* device_info); // Method to copy a tensor from the custom device to a target device. TFE_TensorHandle* (*copy_tensor_from_device)(TFE_Context* context, TFE_TensorHandle* tensor,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 21 22:37:46 UTC 2024 - 39.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
} return true; } // Calculates other_tensor_zp * tensor for zero point offset calculation. // Things to do: // 1. Reduce the tensor (which is an input of XlaDotV2) with contracting // dimensions of XlaDotV2. // - The resultant dimension must match with XlaDotV2 resultant dimension // 2. Multiply it with zero point from the other tensor. // We decided to use tf.Einsum for step 1, since it would require transposes/
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize_drq.cc
// criteria for determining quantizable ops differs by the inference type. bool getQuantizableOps(arith::ConstantOp op, QuantizationUnits& quantizable_ops) const { // Non-float tensors do not need quantization. auto type = mlir::dyn_cast<ShapedType>(op.getType()); if (!type || !type.getElementType().isF32()) return false; Value value = op.getResult();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 11.5K bytes - Viewed (0) -
tensorflow/c/eager/c_api.cc
"TFE_TensorHandleDeviceMemorySize may not be called on a ", handle->TypeString(), " tensor handle."); return 0; } const tensorflow::Tensor* tensor; status->status = handle->Tensor(&tensor); if (!status->status.ok()) { return 0; } return tensor->TotalBytes(); } TFE_Op* TFE_NewOp(TFE_Context* ctx, const char* op_or_function_name,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 09 08:11:23 UTC 2024 - 44K bytes - Viewed (0) -
tensorflow/compiler/jit/kernels/xla_ops.cc
// the beginning are stripped off and the closure key is appended as the // last input. So the inputs look like: input tensors, resource variables, // closure key tensor. std::vector<const Tensor*> inputs = InputsFromContext(ctx); absl::flat_hash_map<int, const Tensor*> variable_snapshots; for (const auto& [variable_index, variable_tensor] : closure.resource_var_snapshots()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 41.4K bytes - Viewed (0)