- Sort Score
- Result 10 results
- Languages All
Results 31 - 40 of 529 for shade (0.1 sec)
-
tensorflow/compiler/mlir/tensorflow/utils/convert_type.h
// Converts an TensorFlow shape to the one used in MLIR. void ConvertToMlirShape(const TensorShape& input_shape, llvm::SmallVectorImpl<int64_t>* shape); // Converts an TensorFlow shape proto to the one used in MLIR. Status ConvertToMlirShape(const TensorShapeProto& input_shape, llvm::SmallVectorImpl<int64_t>* shape);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 2.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc
auto value_type = mlir::cast<RankedTensorType>(value.getType()); auto shape = value_type.getShape(); SmallVector<int64_t, 4> transposed_shape(shape.begin(), shape.end()); for (int i = 0, end = shape.size(); i < end; ++i) { transposed_shape[i] = shape[permutation[i]]; } auto transposed_type = RankedTensorType::get(transposed_shape, value_type.getElementType());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 33.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc
} // Converts the tensor shape proto into an MLIR shape attribute. absl::StatusOr<mlir::Attribute> ConvertTensorShapeProto( const TensorShapeProto& shape, mlir::MLIRContext* context) { if (shape.unknown_rank()) return mlir::TF::ShapeAttr::get(context, std::nullopt); llvm::SmallVector<int64_t, 4> dims; dims.reserve(shape.dim().size()); for (const auto& dim : shape.dim()) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 20.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_recv_device_context.h
// xla::Shape shape(xla::F32, {2, 2}, {}, {}) // tsl::AsyncValueRef<std::unique_ptr<se::Event>> done_event = // tsl::MakeConstructedAsyncValueRef<std::unique_ptr<se::Event>>(stream.parent()); // done_event->Init(); // Tensor dest_cpu_tensor; // // XlaHostRecvDeviceContext device_context(&stream, gpu_dst, // shape, done_event); // device_context.CopyDeviceTensorToCPUSync(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_type.cc
llvm::SmallVectorImpl<int64_t>* shape) { shape->reserve(input_shape.dims()); for (const auto& d : input_shape) { shape->push_back(d.size == kTFDynamicSize ? ShapedType::kDynamic : d.size); } } Status ConvertToMlirShape(const TensorShapeProto& input_shape, llvm::SmallVectorImpl<int64_t>* shape) { shape->reserve(input_shape.dim_size());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc
} // Helper method that given 'shape' and 'current_index' representing // index in broadcasted tensor, get the index in the flat original tensor. // 'shape' is computed from the original shape and the broadcast dimensions to // match result shape. int64_t GetElementIndex(llvm::SmallVectorImpl<int64_t> &shape, llvm::SmallVectorImpl<int64_t> ¤t_index) { int64_t ind = 0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/ops/variable_ops.cc
TF_RETURN_IF_ERROR(varhandle_op->SetAttrType("dtype", dtype)); // Note that if shape is unknown rank, shape.dim_sizes() will be empty, and // shape.dims() will be -1. absl::InlinedVector<int64_t, 4UL> dim_sizes = shape.dim_sizes(); TF_RETURN_IF_ERROR(varhandle_op->SetAttrShape( "shape", reinterpret_cast<const int64_t*>(dim_sizes.data()), shape.dims())); TF_RETURN_IF_ERROR(varhandle_op->SetAttrString("container", "", 0));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 11:28:19 UTC 2024 - 5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/ir/tfl_ops.cc
// 'shape' is the original shape with padding to match result shape. int64_t GetElementIndex(const std::vector<int64_t>& shape, const std::vector<int64_t>& current_index) { int64_t ind = 0; int64_t mul = 1; for (int i = shape.size() - 1; i >= 0; --i) { ind += (current_index[i] % shape[i]) * mul; mul *= shape[i]; } return ind; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 169.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/end2end/unroll_batch_matmul_disabled.pbtxt
node { name: "Placeholder" op: "Placeholder" attr { key: "dtype" value { type: DT_FLOAT } } attr { key: "shape" value { shape { dim { size: 2 } dim { size: 5 } dim { size: 3 } } } } } node {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 1.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td
// tail of the other operand and the intermediate result isn't used by other // ops. // $rhs is required to be the tail shape of $lhs, so after transformation the // shape of the binary op result is valid. For example, assume the shapes of // $input, $lhs and $rhs are [1600], [1,40,40] and [40x1]. After the // transformation, the shape of the binary op result is [40x1600], which
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 16 20:31:41 UTC 2024 - 66.4K bytes - Viewed (0)