Search Options

Results per page
Sort
Preferred Languages
Advance

Results 71 - 80 of 229 for transposes (0.17 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/hardwares/gpu_hardware.cc

    // tfl.relu / tfl.relu6 / tfl.rsqrt / tfl.sin / tfl.slice / tfl.softmax /
    // tfl.space_to_depth / tfl.sqrt / tfl.square / tfl.squared_difference /
    // tfl.strided_slice / tfl.tanh / tfl.transpose / tfl.transpose_conv
    class GpuBasicSupportedOpNoCost : public TargetHardwareOperation {
      double GetOpCost(mlir::Operation* op) const override { return 0; }
    
      bool IsOpSupported(mlir::Operation* op) const override {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 7.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc

    // is then used to set attributes in the quantized functions in the
    // QuantizeCompositeFunctionsPass.
    // For example, for tf.MatMul with `attributes` = {{"transpose_a", false},
    // {"transpose_b", false}}, the generated attr_map is
    // "0:transpose_a,1:transpose_b", where 0 and 1 are the respective attribute
    // identifiers.
    // This function returns success if all attributes could be found.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 21.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/einsum.mlir

      // CHECK-DAG: %[[cst_0:.*]] = arith.constant dense<[0, 2, 1]> : tensor<3xi32>
      // CHECK: %[[v0:.*]] = "tf.Transpose"(%arg0, %[[cst]]) : (tensor<2x5x7xf32>, tensor<3xi32>) -> tensor<5x7x2xf32>
      // CHECK: %[[v1:.*]] = "tf.Transpose"(%arg1, %[[cst_0]]) : (tensor<5x3x2xf32>, tensor<3xi32>) -> tensor<5x2x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 05 18:35:42 UTC 2024
    - 25.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nchw.mlir

      // Convert result back: NHWC -> NCHW
      %3 = "tf.Const"() {value = dense<[0, 3, 1, 2]> : tensor<4xi32>} : () -> tensor<4xi32>
      %4 = "tf.Transpose"(%2, %3) : (tensor<1x32x32x8xf32>, tensor<4xi32>) -> tensor<1x8x32x32xf32>
    
      // Check that Conv2D computed in NCHW format, and all redundant transpose
      // operations removed from the function.
    
      // CHECK: %[[CONV:[0-9]*]] = "tf.Conv2D"(%arg0, %arg1)
      // CHECK-SAME: data_format = "NCHW"
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:47:26 UTC 2022
    - 1.3K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla_selective_quantization.mlir

        %cst_0 = "tf.Const"() {value = dense<[-1, 10]> : tensor<2xi32>} : () -> tensor<2xi32>
        %1 = "tf.MatMul"(%arg0, %arg1) {
          transpose_a = false, transpose_b = false
        } : (tensor<1x10xf32>, tensor<10x10xf32>) -> tensor<1x10xf32>  loc(fused["MatMul:", "test_opt_out"])
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/clone_constants_for_better_clustering_test.cc

      Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT);
    
      Output perm = ops::Const(on_cpu.WithOpName("perm"), {3, 1, 2, 0});
    
      {
        Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm);
        Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm);
      }
    
      std::unique_ptr<Graph> result;
      TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result));
    
      OutputTensor tr0_perm;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/tests/opens2s_gnmt_mixed_precision.golden_summary

     ReverseSequence 1
     Slice 2
     Transpose 3
    cluster 5 size 21
     All 1
     ConcatV2 1
     Const 11
     Equal 1
     ExpandDims 1
     ReverseSequence 1
     Shape 1
     StridedSlice 1
     Transpose 3
    cluster 6 size 11
     Cast 1
     Const 5
     GatherV2 1
     Shape 1
     StridedSlice 1
     Transpose 1
     ZerosLike 1
    cluster 7 size 33
     All 2
     Cast 1
     Const 17
     Equal 2
     ExpandDims 2
     GatherV2 1
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 06 10:38:14 UTC 2023
    - 5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/layout_optimization_to_nhwc.mlir

      // NOFOLD: %[[TRANSPOSE:[0-9]*]] = "tf.Transpose"(%arg0, %[[CST]]) : (tensor<?x224x224x3xf32>, tensor<4xi32>) -> tensor<?x3x224x224xf32>
    
      // Pad input with new paddings.
      // CHECK: %[[PAD:[0-9]*]] = "tf.Pad"(%arg0, %[[PADDINGS]])
      // CHECK-SAME: (tensor<?x224x224x3xf32>, tensor<4x2xi32>) -> tensor<?x230x230x3xf32>
      // NOFOLD: %[[PAD:[0-9]*]] = "tf.Pad"(%[[TRANSPOSE]], %[[PADDING]])
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 7.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/tfl_while_outline.mlir

      %cst_9 = arith.constant dense<-1> : tensor<i32>
      %cst_10 = arith.constant dense<2.1> : tensor<8x5xf32>
      %cst_11 = arith.constant dense<2> : tensor<1xi32>
      %cst_12 = arith.constant dense<1> : tensor<1xi32>
      %0 = "tfl.transpose"(%arg0, %cst_1) : (tensor<4x4x3xf32>, tensor<3xi32>) -> tensor<4x4x3xf32>
      %1:6 = "tfl.while"(%cst_7, %cst_7, %cst_2, %cst, %cst, %0) ({
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfrt/tests/tf_to_corert/tf_to_corert_pipeline.mlir

          %outputs_18, %control_19 = tf_executor.island wraps "tf.MatMul"(%outputs_16, %outputs_4) {device = "", transpose_a = false, transpose_b = false} : (tensor<?x16384xf32>, tensor<*xf32>) -> tensor<?x?xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 00:18:59 UTC 2024
    - 7.7K bytes
    - Viewed (0)
Back to top