Search Options

Results per page
Sort
Preferred Languages
Advance

Results 91 - 100 of 166 for mat_mul (0.15 sec)

  1. tensorflow/c/eager/gradient_checker_test.cc

                       absl::Span<AbstractTensorHandle* const> inputs,
                       absl::Span<AbstractTensorHandle*> outputs) {
      return ops::MatMul(ctx, inputs[0], inputs[1], &outputs[0],
                         /*transpose_a=*/false,
                         /*transpose_b=*/false, "MatMul");
    }
    
    Status MulModel(AbstractContext* ctx,
                    absl::Span<AbstractTensorHandle* const> inputs,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 14 10:03:59 UTC 2023
    - 6.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops.mlir

    // CHECK-DAG: %[[CONST:.*]] = "tf.Const"() <{value = dense<-131072> : tensor<1x3xi32>}> : () -> tensor<1x3xi32>
    // CHECK: %[[MATMUL:.*]] = "tf.XlaDotV2"({{.*}}, %[[WEIGHT]])
    // CHECK-SAME: (tensor<1x1024xi8>, tensor<1024x3xi8>) -> tensor<1x3xi32>
    // CHECK: %[[SUB:.*]] = "tf.Sub"(%[[MATMUL]], %[[CONST]]) : (tensor<1x3xi32>, tensor<1x3xi32>) -> tensor<1x3xi32>
    }
    
    // -----
    
    module attributes {} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 81K bytes
    - Viewed (0)
  3. tensorflow/cc/framework/grad_op_registry.h

                               std::vector<Output>* grad_outputs);
    
    /// GradOpRegistry maintains a static registry of gradient functions.
    /// Gradient functions are indexed in the registry by the forward op name (i.e.
    /// "MatMul" -> MatMulGrad func).
    class GradOpRegistry {
     public:
      /// Registers 'func' as the gradient function for 'op'.
      /// Returns true if registration was successful, check fails otherwise.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 05 15:33:58 UTC 2022
    - 2.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/optimize_batch_matmul.mlir

    // Run optimize-batch-matmul pass only and check the results.
    // RUN: tf-opt %s -tfl-optimize-batch-matmul | FileCheck %s
    
    // CHECK-LABEL: FuseTransposeFCRhsToBatchMatmul
    func.func @FuseTransposeFCRhsToBatchMatmul(%arg0: tensor<16x1024xf32>, %arg1: tensor<1024x128xf32>, %arg2: none) -> tensor<16x128xf32> {
      %cst = arith.constant dense<[1, 0]> : tensor<2xi32>
      %0 = "tfl.transpose"(%arg1, %cst) : (tensor<1024x128xf32>, tensor<2xi32>) -> tensor<128x1024xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 9K bytes
    - Viewed (0)
  5. tensorflow/c/eager/c_api_cluster_test.cc

      TFE_TensorHandle* h0_task0 = TestMatrixTensorHandle(ctx);
    
      TFE_Op* matmul = MatMulOp(ctx, h0_task0, h0_task0);
      TFE_OpSetDevice(matmul, remote_device_name, status);
      EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    
      TFE_TensorHandle* retvals[1];
      int num_retvals = 1;
      TFE_Execute(matmul, &retvals[0], &num_retvals, status);
      EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 14 10:03:59 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  6. tensorflow/cc/framework/gradient_checker_test.cc

    #include "tensorflow/core/platform/test.h"
    #include "tensorflow/core/util/equal_graph_def.h"
    
    namespace tensorflow {
    namespace {
    
    using ops::Complex;
    using ops::Const;
    using ops::Div;
    using ops::MatMul;
    using ops::Placeholder;
    using ops::Real;
    using ops::Split;
    using ops::Square;
    using ops::Stack;
    using ops::Sub;
    using ops::Unstack;
    
    TEST(GradientCheckerTest, BasicFloat) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Aug 06 15:54:08 UTC 2018
    - 6.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tfrt/tests/tfrt_fallback/batching_fallback.mlir

      %ch1 = tfrt.merge.chains %ch, %ch0 : !tfrt.chain, !tfrt.chain
    
      %ch2 = tfrt_fallback_async.createop(%ch1) key(0) device("/CPU:0") "tf.MatMul"() {T = i32} num_args(2)
    
      %ch3, %result = tfrt_fallback_async.executeop.seq(%ch2) key(0) cost(100) device("/CPU:0") "tf.MatMul"(%a, %b) {T = i32}  : 1
    
      %s = "tfrt_test.get_string"() { value = "Running @matmul_cpu" } : () -> !tfrt.string
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jul 18 22:58:56 UTC 2023
    - 8.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized_drq.mlir

                             %input : tensor<*xf32>, %weight : tensor<*x!tf_type.qint8>,
                             %weight_scale : tensor<*xf32>, %weight_zp : tensor<*xi32>) -> tensor<*xf32>
          attributes {tf_quant.quantized_ops = ["MatMul"]} {
    
        %out = "tf.UniformQuantizedDotHybrid"(%input, %weight,
                                    %weight_scale, %weight_zp) {
            Tlhs = "tfdtype$DT_FLOAT",
            Trhs = "tfdtype$DT_QINT8",
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Dec 01 12:06:54 UTC 2022
    - 3.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/shared_variable_v1.py

    
    def Test():
    
      x = tf.constant([[1.0], [1.0], [1.0]])
      y = tf.get_variable(
          name='y',
          shape=(1, 3),
          initializer=tf.random_normal_initializer(),
          trainable=True)
      r = tf.matmul(x, y)
    
      tensor_info_x = tf.saved_model.utils.build_tensor_info(x)
      tensor_info_r = tf.saved_model.utils.build_tensor_info(r)
    
      signature_def = tf.saved_model.signature_def_utils.build_signature_def(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:49:35 UTC 2023
    - 2.7K bytes
    - Viewed (0)
  10. src/runtime/proc_test.go

    		done1 := make(chan struct{}, 1)
    		go matmult(done1, A, B, C, i0, i1, j0, mj, k0, k1, threshold)
    		matmult(nil, A, B, C, i0, i1, mj, j1, k0, k1, threshold)
    		<-done1
    	} else if dk >= threshold {
    		// divide in two by "k" axis
    		// deliberately not parallel because of data races
    		mk := k0 + dk/2
    		matmult(nil, A, B, C, i0, i1, j0, j1, k0, mk, threshold)
    		matmult(nil, A, B, C, i0, i1, j0, j1, mk, k1, threshold)
    	} else {
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Wed Jun 14 00:03:57 UTC 2023
    - 25.8K bytes
    - Viewed (0)
Back to top