- Sort Score
- Result 10 results
- Languages All
Results 91 - 100 of 163 for matmult (0.2 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_xla_weight_only.mlir
// Use identity op to avoid the filter being constant-folded. %identity = "tf.Identity"(%filter) : (tensor<*xi8>) -> tensor<*xi8> %2 = "tf.Cast"(%identity) {Truncate = false} : (tensor<*xi8>) -> tensor<*xf32> %3 = "tf.MatMul"(%input, %2) { attr_map = "transpose_a:0,transpose_b:1" } : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> func.return %3 : tensor<*xf32> } func.func private @internal_conv2d_fn(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 7K bytes - Viewed (0) -
tensorflow/c/eager/c_api_experimental_test.cc
TFE_TensorHandle* m = TestMatrixTensorHandle(ctx); TFE_Op* matmul = MatMulOp(ctx, m, m); TFE_TensorHandle* retvals[2] = {nullptr, nullptr}; int num_retvals = 2; TFE_Execute(matmul, &retvals[0], &num_retvals, status); EXPECT_EQ(1, num_retvals); EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status); TFE_DeleteOp(matmul); TFE_DeleteTensorHandle(m);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Aug 03 03:14:26 UTC 2023 - 31.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/batchmatmul_to_einsum.mlir
// RUN: tf-opt %s -tf-batch-matmul-to-tf-einsum | FileCheck %s func.func @test_batch_matmul_to_einsum(%arg0: tensor<1x2x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x2x4xf32> { // CHECK-LABEL: test_batch_matmul_to_einsum // CHECK: "tf.Einsum"(%arg0, %arg1) <{equation = "...mk,...kn->...mn"}> : (tensor<1x2x3xf32>, tensor<3x4xf32>) -> tensor<1x2x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/import_restore_v1.py
def Test(): x = tf.constant([[1.0], [1.0], [1.0]]) y = tf.compat.v1.get_variable( name='y', shape=(1, 3), initializer=tf.random_normal_initializer(), trainable=True) r = tf.matmul(x, y) tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x) tensor_info_r = tf.compat.v1.saved_model.utils.build_tensor_info(r) return {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:49:35 UTC 2023 - 2.8K bytes - Viewed (1) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_tf_drq.mlir
%zp_fp32 = "tf.Cast"(%zp_from_max) : (tensor<1xf64>) -> tensor<1xf32> %zp = "tf.Cast"(%zp_fp32) : (tensor<1xf32>) -> tensor<1xi32> func.return %scale, %zp : tensor<1xf32>, tensor<1xi32> } // Matmul with int32 accumulation func.func private @internal_matmul_fn( %input : tensor<*xi8>, %filter : tensor<*xi8>, %input_scale : tensor<*xf32>, %input_zp : tensor<*xi32>,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 12.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h
std::unique_ptr<OperationPass<mlir::func::FuncOp>> CreateOptimizePass(); // Creates an instance of the ReplaceCastHacksWithTFXLAOpsPass, which will // replace mixed-type convolution and matmul cast hacks by XLA Conv2DOp and // MatmulOp. std::unique_ptr<OperationPass<func::FuncOp>> CreateReplaceCastHacksWithTFXLAOpsPass(); // Creates a pass that moves & merges initializer function's ops into the @main
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 12.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/README.md
(TODO) ## Authoring Op Composition in Python The composable TF provides a single API to define a new op with its composition at the same time. For example, the following code defines a new `FusedFullyConnected` op, which have `MatMul`, `Add` and some `activation function` (specified by an op attribute) fused. ```python import tensorflow as tf @Composite( 'FusedFullyConnected', inputs=['input_: T', 'filter_: T', 'bias: T'],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 29 18:32:13 UTC 2022 - 6.2K bytes - Viewed (0) -
tensorflow/cc/framework/scope.h
/// int idx = 3; /// auto b = Variable(linear.WithOpName("b_", idx), /// {2}, DT_FLOAT); /// auto x = Const(linear, {...}); // name: "linear/Const" /// auto m = MatMul(linear, x, W); // name: "linear/MatMul" /// auto r = BiasAdd(linear, m, b); // name: "linear/BiasAdd" /// /// Scope lifetime: /// /// A new scope is created by calling Scope::NewRootScope. This creates some
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 13 09:08:33 UTC 2024 - 10.5K bytes - Viewed (0) -
tensorflow/c/eager/gradient_checker_test.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 14 10:03:59 UTC 2023 - 6.5K bytes - Viewed (0) -
tensorflow/cc/framework/grad_op_registry.h
std::vector<Output>* grad_outputs); /// GradOpRegistry maintains a static registry of gradient functions. /// Gradient functions are indexed in the registry by the forward op name (i.e. /// "MatMul" -> MatMulGrad func). class GradOpRegistry { public: /// Registers 'func' as the gradient function for 'op'. /// Returns true if registration was successful, check fails otherwise.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 15:33:58 UTC 2022 - 2.9K bytes - Viewed (0)