Search Options

Results per page
Sort
Preferred Languages
Advance

Results 71 - 80 of 163 for matmult (0.08 sec)

  1. tensorflow/compiler/mlir/tensorflow/tests/fused_kernel_matcher.mlir

      %3 = "tf.Identity"(%2) : (tensor<*xf32>) -> tensor<*xf32>
      func.return %3 : tensor<*xf32>
    }
    
    //===----------------------------------------------------------------------===//
    // MatMul + BiasAdd + <Activation> fusions.
    //===----------------------------------------------------------------------===//
    
    // CHECK-LABEL: matmulBiasAdd
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 13.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

        ).astype(np.float32)
    
        class TwoMatmulModel(module.Module):
          """A model with two matmul ops."""
    
          @def_function.function
          def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]:
            """Performs a matrix multiplication.
    
            Args:
              input_tensor: Input tensor to matmul with the filter.
    
            Returns:
              A 'output' -> output tensor mapping
            """
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/device_conversion.mlir

        %arg1: tensor<1x3xf32> {tf_saved_model.index_path = [0]})
          -> (tensor<3x3xf32> {tf_saved_model.index_path = []}) {
      // CHECK: {{%.*}} = corert.get_op_handler %arg0 "/device:GPU:0"
      %2 = "tf.MatMul"(%arg0, %arg1) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"], device = "/device:GPU:0", transpose_a = false, transpose_b = false} : (tensor<3x1xf32>, tensor<1x3xf32>) -> tensor<3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 645 bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/compile_mlir_util/shape-inference.mlir

    module attributes {tf.versions = {producer = 179 : i32}} {
      func.func @main(%arg0: tensor<*xf32>, %arg1: tensor<?x19xf32>) -> tensor<?x19xf32> {
        %0 = "tf.MatMul"(%arg0, %arg1) {T = "tfdtype$DT_FLOAT", transpose_a = false, transpose_b = false} : (tensor<*xf32>, tensor<?x19xf32>) -> tensor<?x19xf32>
        func.return %0 : tensor<?x19xf32>
      }
    }
    
    // CHECK-LABEL: HloModule main
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 23 18:56:13 UTC 2022
    - 969 bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/attributes.mlir

      // CHECK: {{%.*}} = tfrt_fallback_async.executeop {{.*}} device("/device:CPU:0") "tf.MatMul"
      // CHECK-SAME: {T = f32, transpose_a = false, transpose_b = false}
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.h

    namespace mlir {
    namespace TF {
    
    // Populate patterns to unroll tf.BatchMatMulV2 op into a sequence of TF ops.
    // Since TFLite does not support BatchMatMul operation, it unrolls a BatchMatMul
    // op into tf.Reshape, tf.Slice, tf.MatMul, tf.Pack, and tf.Reshape ops.
    void PopulateUnrollTfBatchMatMul(MLIRContext* context,
                                     RewritePatternSet& patterns);
    
    }  // namespace TF
    }  // namespace mlir
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jan 27 15:05:02 UTC 2022
    - 1.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/basic.mlir

      // CHECK-NEXT: [[ch1:%.*]], [[var:%.*]] = tfrt_fallback_async.executeop.seq([[in_chain]]) {{.*}} "tf.ReadVariableOp"([[arg1]])
      // CHECK-NEXT: [[r0:%.*]] = tfrt_fallback_async.executeop {{.*}} "tf.MatMul"([[arg0]], [[var]])
      %2 = "tf.MatMul"(%arg0, %1) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"], device = "/device:CPU:0", transpose_a = false, transpose_b = false} : (tensor<3x1xf32>, tensor<1x3xf32>) -> tensor<3x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 3.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/tests/keras_imagenet_main_graph_mode.golden_summary

     BiasAdd 1
     BiasAddGrad 1
     Cast 3
     Const 357
     Conv2D 53
     Conv2DBackpropFilter 53
     Conv2DBackpropInput 52
     DivNoNan 1
     Equal 1
     FusedBatchNorm 53
     FusedBatchNormGrad 53
     Identity 2
     MatMul 3
     MaxPool 1
     MaxPoolGrad 1
     Mean 1
     Mul 164
     Pad 1
     ReadVariableOp 646
     Relu 49
     ReluGrad 49
     Reshape 2
     ResourceApplyKerasMomentum 161
     ShapeN 50
     Softmax 1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 06 10:38:14 UTC 2023
    - 740 bytes
    - Viewed (0)
  9. tensorflow/c/eager/c_api_remote_test_util.h

    ==============================================================================*/
    #ifndef TENSORFLOW_C_EAGER_C_API_REMOTE_TEST_UTIL_H_
    #define TENSORFLOW_C_EAGER_C_API_REMOTE_TEST_UTIL_H_
    
    // Run a function containing a MatMul op and check its output.
    // If heavy_load_on_streaming_rpc is true, send some rpc requests before the one
    // which creates a remote input, to simulate a scenario that the remote input
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Dec 11 22:56:03 UTC 2020
    - 1.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/device_assignment.mlir

    func.func @device_test(%arg0: tensor<3x1xf32>) -> (tensor<3x3xf32>) {
    
      // CHECK: device = "gpu"
      %0 = "tf.Const"() {value = dense<[[1.0, 2.0, 3.0]]> : tensor<1x3xf32>} : () -> tensor<1x3xf32>
      // CHECK: device = "gpu"
      %1 = "tf.MatMul"(%arg0, %0) {T = f32, _output_shapes = ["tfshape$dim { size: 3 } dim { size: 3 }"], device = "", transpose_a = false, transpose_b = false} : (tensor<3x1xf32>, tensor<1x3xf32>) -> tensor<3x3xf32>
      // CHECK: device = "cpu"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:47:26 UTC 2022
    - 924 bytes
    - Viewed (0)
Back to top