- Sort Score
- Result 10 results
- Languages All
Results 81 - 90 of 166 for mat_mul (0.13 sec)
-
tensorflow/c/experimental/ops/gen/cpp/golden/testing_ops.h.golden
namespace tensorflow { namespace ops { // Status Neg(AbstractContext* ctx, AbstractTensorHandle* const x, AbstractTensorHandle** y, const char* name = nullptr, const char* raw_device_name = nullptr); //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Nov 16 19:04:03 UTC 2023 - 2.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/basic_v1_no_variable_lifting.py
def Test(): x = tf.constant([[1.0], [1.0], [1.0]]) y = tf.compat.v1.get_variable( name='y', shape=(1, 3), initializer=tf.random_normal_initializer(), trainable=True) r = tf.matmul(x, y) tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x) tensor_info_r = tf.compat.v1.saved_model.utils.build_tensor_info(r) return {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 1.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.cc
auto matmul = rewriter.create<TFL::BatchMatMulOp>( loc, RankedTensorType::get(matmul_shape, result_type.getElementType()), lhs_flattend, rhs_flattend, /*adj_x*/ false_attr, /*adj_y*/ false_attr, /*asym_quant_input*/ false_attr); if (result_type.hasStaticShape()) { auto reshaped = rewriter.create<mhlo::ReshapeOp>(loc, result_type, matmul.getResult()); return reshaped.getResult();
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 19.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_xla_weight_only.mlir
// Use identity op to avoid the filter being constant-folded. %identity = "tf.Identity"(%filter) : (tensor<*xi8>) -> tensor<*xi8> %2 = "tf.Cast"(%identity) {Truncate = false} : (tensor<*xi8>) -> tensor<*xf32> %3 = "tf.MatMul"(%input, %2) { attr_map = "transpose_a:0,transpose_b:1" } : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> func.return %3 : tensor<*xf32> } func.func private @internal_conv2d_fn(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/batchmatmul_to_einsum.mlir
// RUN: tf-opt %s -tf-batch-matmul-to-tf-einsum | FileCheck %s func.func @test_batch_matmul_to_einsum(%arg0: tensor<1x2x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x2x4xf32> { // CHECK-LABEL: test_batch_matmul_to_einsum // CHECK: "tf.Einsum"(%arg0, %arg1) <{equation = "...mk,...kn->...mn"}> : (tensor<1x2x3xf32>, tensor<3x4xf32>) -> tensor<1x2x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/import_restore_v1.py
def Test(): x = tf.constant([[1.0], [1.0], [1.0]]) y = tf.compat.v1.get_variable( name='y', shape=(1, 3), initializer=tf.random_normal_initializer(), trainable=True) r = tf.matmul(x, y) tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x) tensor_info_r = tf.compat.v1.saved_model.utils.build_tensor_info(r) return {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:49:35 UTC 2023 - 2.8K bytes - Viewed (1) -
tensorflow/c/eager/c_api_distributed_test.cc
ASSERT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TFE_Op* matmul = MatMulOp(ctx, h0_task1, h1_task1); TFE_OpSetDevice(matmul, remote_device_name, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TFE_TensorHandle* retvals[1]; int num_retvals = 1; TFE_Execute(matmul, &retvals[0], &num_retvals, status); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 15 09:49:45 UTC 2024 - 23.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/README.md
(TODO) ## Authoring Op Composition in Python The composable TF provides a single API to define a new op with its composition at the same time. For example, the following code defines a new `FusedFullyConnected` op, which have `MatMul`, `Add` and some `activation function` (specified by an op attribute) fused. ```python import tensorflow as tf @Composite( 'FusedFullyConnected', inputs=['input_: T', 'filter_: T', 'bias: T'],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 29 18:32:13 UTC 2022 - 6.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir
%zp_fp32 = "tf.Cast"(%zp_from_max) : (tensor<1xf64>) -> tensor<1xf32> %zp = "tf.Cast"(%zp_fp32) : (tensor<1xf32>) -> tensor<1xi32> func.return %scale, %zp : tensor<1xf32>, tensor<1xi32> } // Matmul with int32 accumulation func.func private @internal_matmul_fn( %input : tensor<*xi8>, %filter : tensor<*xi8>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 12.2K bytes - Viewed (0) -
tensorflow/cc/framework/scope.h
/// int idx = 3; /// auto b = Variable(linear.WithOpName("b_", idx), /// {2}, DT_FLOAT); /// auto x = Const(linear, {...}); // name: "linear/Const" /// auto m = MatMul(linear, x, W); // name: "linear/MatMul" /// auto r = BiasAdd(linear, m, b); // name: "linear/BiasAdd" /// /// Scope lifetime: /// /// A new scope is created by calling Scope::NewRootScope. This creates some
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 09:08:33 UTC 2024 - 10.5K bytes - Viewed (0)