- Sort Score
- Result 10 results
- Languages All
Results 51 - 60 of 74 for mat_mul (0.2 sec)
-
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/basic_v1_no_variable_lifting.py
def Test(): x = tf.constant([[1.0], [1.0], [1.0]]) y = tf.compat.v1.get_variable( name='y', shape=(1, 3), initializer=tf.random_normal_initializer(), trainable=True) r = tf.matmul(x, y) tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x) tensor_info_r = tf.compat.v1.saved_model.utils.build_tensor_info(r) return {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Sep 28 21:37:05 UTC 2021 - 1.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_xla_weight_only.mlir
// Use identity op to avoid the filter being constant-folded. %identity = "tf.Identity"(%filter) : (tensor<*xi8>) -> tensor<*xi8> %2 = "tf.Cast"(%identity) {Truncate = false} : (tensor<*xi8>) -> tensor<*xf32> %3 = "tf.MatMul"(%input, %2) { attr_map = "transpose_a:0,transpose_b:1" } : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32> func.return %3 : tensor<*xf32> } func.func private @internal_conv2d_fn(
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Mar 03 15:43:38 UTC 2023 - 7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/batchmatmul_to_einsum.mlir
// RUN: tf-opt %s -tf-batch-matmul-to-tf-einsum | FileCheck %s func.func @test_batch_matmul_to_einsum(%arg0: tensor<1x2x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x2x4xf32> { // CHECK-LABEL: test_batch_matmul_to_einsum // CHECK: "tf.Einsum"(%arg0, %arg1) <{equation = "...mk,...kn->...mn"}> : (tensor<1x2x3xf32>, tensor<3x4xf32>) -> tensor<1x2x4xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/import_restore_v1.py
def Test(): x = tf.constant([[1.0], [1.0], [1.0]]) y = tf.compat.v1.get_variable( name='y', shape=(1, 3), initializer=tf.random_normal_initializer(), trainable=True) r = tf.matmul(x, y) tensor_info_x = tf.compat.v1.saved_model.utils.build_tensor_info(x) tensor_info_r = tf.compat.v1.saved_model.utils.build_tensor_info(r) return {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Oct 31 08:49:35 UTC 2023 - 2.8K bytes - Viewed (1) -
tensorflow/compiler/mlir/tfr/README.md
(TODO) ## Authoring Op Composition in Python The composable TF provides a single API to define a new op with its composition at the same time. For example, the following code defines a new `FusedFullyConnected` op, which have `MatMul`, `Add` and some `activation function` (specified by an op attribute) fused. ```python import tensorflow as tf @Composite( 'FusedFullyConnected', inputs=['input_: T', 'filter_: T', 'bias: T'],
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Mar 29 18:32:13 UTC 2022 - 6.2K bytes - Viewed (0) -
tensorflow/c/eager/gradient_checker_test.cc
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Apr 14 10:03:59 UTC 2023 - 6.5K bytes - Viewed (0) -
tensorflow/cc/framework/grad_op_registry.h
std::vector<Output>* grad_outputs); /// GradOpRegistry maintains a static registry of gradient functions. /// Gradient functions are indexed in the registry by the forward op name (i.e. /// "MatMul" -> MatMulGrad func). class GradOpRegistry { public: /// Registers 'func' as the gradient function for 'op'. /// Returns true if registration was successful, check fails otherwise.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Oct 05 15:33:58 UTC 2022 - 2.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/tests/optimize_batch_matmul.mlir
// Run optimize-batch-matmul pass only and check the results. // RUN: tf-opt %s -tfl-optimize-batch-matmul | FileCheck %s // CHECK-LABEL: FuseTransposeFCRhsToBatchMatmul func.func @FuseTransposeFCRhsToBatchMatmul(%arg0: tensor<16x1024xf32>, %arg1: tensor<1024x128xf32>, %arg2: none) -> tensor<16x128xf32> { %cst = arith.constant dense<[1, 0]> : tensor<2xi32> %0 = "tfl.transpose"(%arg1, %cst) : (tensor<1024x128xf32>, tensor<2xi32>) -> tensor<128x1024xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu May 02 09:41:17 UTC 2024 - 9K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized_drq.mlir
%input : tensor<*xf32>, %weight : tensor<*x!tf_type.qint8>, %weight_scale : tensor<*xf32>, %weight_zp : tensor<*xi32>) -> tensor<*xf32> attributes {tf_quant.quantized_ops = ["MatMul"]} { %out = "tf.UniformQuantizedDotHybrid"(%input, %weight, %weight_scale, %weight_zp) { Tlhs = "tfdtype$DT_FLOAT", Trhs = "tfdtype$DT_QINT8",
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Dec 01 12:06:54 UTC 2022 - 3.9K bytes - Viewed (0) -
tensorflow/cc/framework/gradient_checker_test.cc
#include "tensorflow/core/platform/test.h" #include "tensorflow/core/util/equal_graph_def.h" namespace tensorflow { namespace { using ops::Complex; using ops::Const; using ops::Div; using ops::MatMul; using ops::Placeholder; using ops::Real; using ops::Split; using ops::Square; using ops::Stack; using ops::Sub; using ops::Unstack; TEST(GradientCheckerTest, BasicFloat) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Aug 06 15:54:08 UTC 2018 - 6.7K bytes - Viewed (0)