- Sort Score
- Result 10 results
- Languages All
Results 141 - 150 of 1,694 for shade (0.09 sec)
-
tensorflow/compiler/mlir/lite/stablehlo/transforms/fuse_convolution_pass.cc
.getOutputDimensions() .getDefiningOp<shape::ShapeOfOp>(); // Check if the shape come from the original conv op. if (!shape_of_op || shape_of_op.getArg().getDefiningOp<mhlo::ConvolutionOp>() != conv_op) { return failure(); } Value new_shape_of = rewriter.create<shape::ShapeOfOp>( mul_op.getLoc(), shape_of_op.getType(), new_conv);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 22:21:19 UTC 2024 - 8.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/math.mlir
// CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { // CHECK-NEXT: shape: [ 4 ], // CHECK-NEXT: buffer: 1, // CHECK-NEXT: name: "arg0", // CHECK-NEXT: quantization: { // CHECK-EMPTY: // CHECK-NEXT: }, // CHECK-NEXT: has_rank: true // CHECK-NEXT: }, { // CHECK-NEXT: shape: [ 4 ], // CHECK-NEXT: buffer: 2, // CHECK-NEXT: name: "Const",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 5.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/examples/mnist/mnist_train.py
# output shape: [-1, 7*7*64] reshape = tf.reshape(max_pool2, [-1, flatten_size]) # output shape: [-1, 1024] fc1 = gen_mnist_ops.new_fully_connected(reshape, self.weights['f3'], self.biases['b3'], 'RELU') # output shape: [-1, 10] return gen_mnist_ops.new_fully_connected(fc1, self.weights['f4'], self.biases['b4'])
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 20 03:05:18 UTC 2021 - 6.5K bytes - Viewed (0) -
tensorflow/compiler/jit/xla_host_recv_device_context.h
// xla::Shape shape(xla::F32, {2, 2}, {}, {}) // tsl::AsyncValueRef<std::unique_ptr<se::Event>> done_event = // tsl::MakeConstructedAsyncValueRef<std::unique_ptr<se::Event>>(stream.parent()); // done_event->Init(); // Tensor dest_cpu_tensor; // // XlaHostRecvDeviceContext device_context(&stream, gpu_dst, // shape, done_event); // device_context.CopyDeviceTensorToCPUSync(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 17 22:46:36 UTC 2024 - 3.9K bytes - Viewed (0) -
tensorflow/cc/ops/const_op.h
} template <typename T> Output Const(const Scope& scope, const T& v, const TensorShape shape) { return Const(scope, Input::Initializer(v, shape)); } template <typename T> Output Const(const Scope& scope, const std::initializer_list<T>& v, const TensorShape shape) { return Const(scope, Input::Initializer(v, shape)); } std::vector<NodeBuilder::NodeOut> AsNodeOutList(const Scope& scope,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Sep 17 09:17:01 UTC 2020 - 2.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/utils/convert_type.cc
llvm::SmallVectorImpl<int64_t>* shape) { shape->reserve(input_shape.dims()); for (const auto& d : input_shape) { shape->push_back(d.size == kTFDynamicSize ? ShapedType::kDynamic : d.size); } } Status ConvertToMlirShape(const TensorShapeProto& input_shape, llvm::SmallVectorImpl<int64_t>* shape) { shape->reserve(input_shape.dim_size());
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 26 09:37:10 UTC 2024 - 7.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/promote_var_handles_to_args.mlir
%0 = "tf.VarHandleOp"() {container = "", shape = "tfshape$", shared_name = "x"} : () -> tensor<!tf_type.resource<tensor<f32>>> func.return } // CHECK-LABEL: func @some_args // CHECK-SAME: (%arg0: tensor<i1>, %arg1: tensor<!tf_type.resource<tensor<f32>>> {tf.resource_name = "x"}) // CHECK-NOT: "tf.VarHandleOp" func.func @some_args(%arg0: tensor<i1>) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Mar 28 12:06:33 UTC 2022 - 2.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/custom_op_with_tflite_op.mlir
// CHECK-NEXT: subgraphs: [ { // CHECK-NEXT: tensors: [ { // CHECK-NEXT: shape: [ 4 ], // CHECK-NEXT: buffer: 1, // CHECK-NEXT: name: "arg0", // CHECK-NEXT: quantization: { // CHECK-EMPTY: // CHECK-NEXT: }, // CHECK-NEXT: has_rank: true // CHECK-NEXT: }, { // CHECK-NEXT: shape: [ 4 ], // CHECK-NEXT: buffer: 2, // CHECK-NEXT: name: "Const",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 14 16:41:28 UTC 2022 - 4.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc
} // Helper method that given 'shape' and 'current_index' representing // index in broadcasted tensor, get the index in the flat original tensor. // 'shape' is computed from the original shape and the broadcast dimensions to // match result shape. int64_t GetElementIndex(llvm::SmallVectorImpl<int64_t> &shape, llvm::SmallVectorImpl<int64_t> ¤t_index) { int64_t ind = 0;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/c/experimental/saved_model/core/ops/variable_ops.cc
TF_RETURN_IF_ERROR(varhandle_op->SetAttrType("dtype", dtype)); // Note that if shape is unknown rank, shape.dim_sizes() will be empty, and // shape.dims() will be -1. absl::InlinedVector<int64_t, 4UL> dim_sizes = shape.dim_sizes(); TF_RETURN_IF_ERROR(varhandle_op->SetAttrShape( "shape", reinterpret_cast<const int64_t*>(dim_sizes.data()), shape.dims())); TF_RETURN_IF_ERROR(varhandle_op->SetAttrString("container", "", 0));
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 23 11:28:19 UTC 2024 - 5K bytes - Viewed (0)