- Sort Score
- Result 10 results
- Languages All
Results 31 - 38 of 38 for ShapeN (0.39 sec)
-
tensorflow/c/eager/c_api_test.cc
// .device of shape is GPU since the op is executed on GPU device_name = TFE_TensorHandleDeviceName(retvals[0], status.get()); ASSERT_EQ(TF_OK, TF_GetCode(status.get())) << TF_Message(status.get()); ASSERT_TRUE(absl::StrContains(device_name, "GPU:0")) << device_name; // .backing_device of shape is CPU since the tensor is backed by CPU backing_device_name =
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 03 20:50:20 UTC 2023 - 94.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
def test_ptq_selective_denylist(self): """Tests that the op is not quantized when no quantization is enabled.""" rng = np.random.default_rng(1230) random_tensor_gen_fn = lambda shape: rng.uniform( low=-1.0, high=1.0, size=shape ).astype(np.float32) class TwoMatmulModel(module.Module): """A model with two matmul ops.""" @def_function.function
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/c/c_api_function_test.cc
TF_DeleteStatus); TF_Tensor* tensor_shape = Int32Tensor({37, 1}); TF_Operation* shape = Const(tensor_shape, func_graph.get(), s.get(), "shape"); TF_Operation* random = RandomUniform(shape, TF_FLOAT, func_graph.get(), s.get()); TF_Output outputs[] = {{random, 0}}; *func = TF_GraphToFunction(func_graph.get(), name,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Jul 20 22:08:54 UTC 2023 - 63.6K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
// If weight_shape[2] != 1, it means weight shape was already restored. if (weight_shape[2] != 1) return failure(); // Weight was reshaped into [H, W, 1, InxMul]. // Since we know in_channels from input_shape, we can derive multiplier. int64_t in_channels = input_shape[3]; // If in_channels is 1, there is no need to restore weight shape. if (in_channels == 1) return failure();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 54.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
builder.setInsertionPoint(op); if (!const_true) const_true = builder.create<TF::ConstOp>( op.getLoc(), DenseIntElementsAttr::get( RankedTensorType::get(/*shape=*/{}, builder.getI1Type()), true)); op.getIsInitialized().replaceAllUsesWith(const_true); op.erase(); } } // Performs store-load forwarding. This effectively removes
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 55.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/compose_uniform_quantized_type_pass.cc
output_zero_point_value); Value conv_output_value = op.getResult(); auto output_uniform_quantized_tensor_type = RankedTensorType::getChecked( rewriter.getUnknownLoc(), /*shape=*/ mlir::cast<TensorType>(conv_output_value.getType()).getShape(), output_uniform_quantized_type); SmallVector<Type> new_conv_output_types = { output_uniform_quantized_tensor_type};
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 64.6K bytes - Viewed (0) -
src/runtime/traceback.go
// It returns three pieces so it doesn't need an allocation for string // concatenation. func funcNamePiecesForPrint(name string) (string, string, string) { // Replace the shape name in generic function with "...". i := bytealg.IndexByteString(name, '[') if i < 0 { return name, "", "" } j := len(name) - 1 for name[j] != ']' { j-- } if j <= i {
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Wed May 29 16:25:21 UTC 2024 - 55.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/resource_op_lifting.mlir
func.return %1 : tensor<*xi32> } // ----- // Tests that resource ops with both load and store are hoisted // but input to load and output from store have mixed defined/undefined shapes. // CHECK-LABEL: func @same_resource_load_and_store_cast func.func @same_resource_load_and_store_cast() -> tensor<1xi32> { // CHECK: %[[RES_HANDLE:[0-9]*]] = "tf.VarHandleOp"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 74K bytes - Viewed (0)