- Sort Score
- Result 10 results
- Languages All
Results 1 - 8 of 8 for new_shape (0.17 sec)
-
tensorflow/compiler/mlir/tensorflow/ir/tf_ops_layout_helper.cc
ArrayRef<int64_t> shape = ranked_type.getShape(); assert(permutation.size() == shape.size()); SmallVector<int64_t, 4> new_shape(permutation.size()); for (size_t i = 0; i < permutation.size(); ++i) new_shape[i] = shape[permutation[i]]; return RankedTensorType::get(new_shape, ranked_type.getElementType()); } return type; } bool AreCancellablePermutations(DenseIntElementsAttr perm0,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/set_tpu_infeed_layout.cc
XLA_Shape old_shape_c = {}; XLA_Shape new_shape_c = {}; TfTpu_ExecutorApiFn *executor = stream_executor::tpu::ExecutorApiFn(); if (!stream_executor::tpu::IsInitialized(executor)) { return failure(); } ApiConverter::ToC(old_shape, &old_shape_c); executor->TpuTransferManager_GetInfeedLayoutFn(&old_shape_c, &new_shape_c); xla::Shape new_shape = ApiConverter::FromC(&new_shape_c); ApiConverter::Destroy(&old_shape_c);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/legacy_reshape.json
"outputs": [1], "operators": [ { "inputs": [ 0 ], "outputs": [ 1 ], "builtin_options_type": "ReshapeOptions", "builtin_options": { "new_shape": [ 2, 2 ] } } ] } ], "buffers": []
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 986 bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_to_hlo_pipeline/sccp-post-shape-inference.mlir
%2 = "tf.PartitionedCall"(%1) {config = "", config_proto = "", executor_type = "", f = @get_shape} : (tensor<?x?xf32>) -> (tensor<?xi64>) // CHECK: return %[[RESULT]] func.return %2 : tensor<?xi64> } // CHECK-LABEL: func @get_shape func.func @get_shape(%arg0 : tensor<*xi64>) -> tensor<?xi64> { %0 = "tf.Shape"(%arg0) : (tensor<*xi64>) -> tensor<?xi64>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jul 25 02:54:34 UTC 2023 - 1020 bytes - Viewed (0) -
src/vendor/golang.org/x/crypto/sha3/shake.go
} // cSHAKE specific context type cshakeState struct { *state // SHA-3 state context and Read/Write operations // initBlock is the cSHAKE specific initialization set of bytes. It is initialized // by newCShake function and stores concatenation of N followed by S, encoded // by the method specified in 3.3 of [1]. // It is stored here in order for Reset() to be able to put context into // initial state. initBlock []byte }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Tue Jun 04 16:19:04 UTC 2024 - 5.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops.cc
} // Save the new state value to the resource. pack_args.push_back(key); Value new_state = rewriter.create<PackOp>(loc, res_type, pack_args); rewriter.create<AssignVariableOp>(loc, rng_op.getResource(), new_state); // Pad the original state as necessary to fill the output shape. int pad = tensorflow::RNG_MAX_COUNTER_SIZE - counter_size;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 03 12:35:38 UTC 2022 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/optimize.cc
TF::ConstOp new_reshape_shape = GetI64ConstantTensor( rewriter, ArrayRef<int64_t>(new_reshape_dims), op.getLoc()); auto new_reshape_type = RankedTensorType::get(new_reshape_dims, el_ty); ReshapeOp new_reshape = rewriter.create<ReshapeOp>(new_reshape_shape.getLoc(), new_reshape_type, op.getInput(), new_reshape_shape); TF::ConstOp new_broadcast_shape =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 8.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/g3doc/space_to_depth.md
kernel_initializer=tf.variance_scaling_initializer(), data_format=data_format) # Use the image size without space-to-depth transform as the input of conv0. batch_size, h, w, channel = inputs.get_shape().as_list() conv0.build([ batch_size, h * space_to_depth_block_size, w * space_to_depth_block_size, channel // (space_to_depth_block_size**2) ]) kernel = conv0.weights[0]
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Oct 24 02:51:43 UTC 2020 - 8.3K bytes - Viewed (0)