Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 150 for 64xf32 (0.69 sec)

  1. tensorflow/compiler/mlir/lite/tests/split-merged-operands.mlir

    unction = "NONE", time_major = true}> : (tensor<4x4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4x4xf32>, tensor<4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>, tensor<4x4xf32>) -> tensor<4x4x4xf32>
    
      %0 = "tfl.pseudo_const" () {value =...
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 7.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/flex_op_with_tflite_op.mlir

      %1 = "tfl.mul"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
      // tf.div is the result of conversion to a Flex TF op
      %2 = "tf.Div"(%1, %0)  : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
      %3 = "tfl.exp"(%2)  : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
      func.return %3 : tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 3.9K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/lstm.mlir

      func.return %24 : tensor<1x4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 20.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/disable_flex_enable_builtin.mlir

      %0 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
      %1 = "tfl.mul"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul0")
      %2 = "tfl.mul"(%1, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul1")
      %3 = "tfl.exp"(%2)  : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
      func.return %3 : tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 3.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/pruning_function_input_as_output.mlir

    // CHECK: (%[[ARG:.*]]: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>)
    func.func @main(%arg0: tensor<4xf32>) -> tensor<4xf32> attributes {tf.entry_function = {inputs = "mul"}} {
      %0 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
      %1 = "tfl.squared_difference"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("squared_difference")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 07:35:24 UTC 2022
    - 1.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/pruning.mlir

      // CHECK: %[[DIV:.*]] = tfl.div
      %3 = "tfl.div"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
      // CHECK: %[[EXP:.*]] = "tfl.exp"
      %4 = "tfl.exp"(%3) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
      // tfl.neg should be pruned
      // CHECK-NOT: "tfl.neg"
      %5 = "tfl.neg"(%4) : (tensor<4xf32>) -> tensor<4xf32> loc("neg")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 07:35:24 UTC 2022
    - 1.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/drop_while_shape_invariant.mlir

        "tf.Yield"(%2) : (tensor<i1>) -> ()
      }, {
      ^body(%barg0: tensor<*xf32>):
        %2 = "tf.SomeOp"(%barg0) : (tensor<*xf32>) -> tensor<*xf32>
        "tf.Yield"(%2) : (tensor<*xf32>) -> ()
      }) {is_stateless = false, shape_invariant} : (tensor<4xf32>) -> (tensor<*xf32>)
    
      func.return %0, %1 : tensor<*xf32>, tensor<*xf32>
    }
    
    // Test that both passes drop the shape_invariant attribute from
    // While/WhileRegion ops within a cluster.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 12:06:33 UTC 2022
    - 2.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/output_arrays.mlir

      // CHECK: %[[DIV:.*]] = tfl.div
      %3 = "tfl.div"(%2, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("div")
      // CHECK: %[[EXP:.*]] = "tfl.exp"
      %4 = "tfl.exp"(%3) : (tensor<4xf32>) -> tensor<4xf32> loc("exp")
      // tfl.neg should not be pruned
      // CHECK: %[[NEG:.*]] = "tfl.neg"
      %5 = "tfl.neg"(%4) : (tensor<4xf32>) -> tensor<4xf32> loc("neg")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 07:35:24 UTC 2022
    - 1.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/math.mlir

      // CHECK-NEXT: }
    
      %0 = "tfl.pseudo_const" () {value = dense<1.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
      %1 = "tfl.squared_difference"(%arg0, %0) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("squared_difference")
      %2 = "tfl.mul"(%arg0, %1) {fused_activation_function = "NONE"} : (tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32> loc("mul")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 5.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/tests/mlir2flatbuffer/svdf.mlir

      %0 = "tfl.pseudo_const" () {value = dense<0.0> : tensor<4xf32>} : () -> tensor<4xf32> loc("Const")
      %1 = "tfl.svdf"(%arg0, %arg1, %arg2, %arg3, %0) {fused_activation_function = "RELU", rank = 2 : i32} : (tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>, tensor<4xf32>) -> tensor<4xf32>
      func.return %1 : tensor<4xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jul 14 16:41:28 UTC 2022
    - 3.6K bytes
    - Viewed (0)
Back to top