Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 49 for 8x10xf32 (0.13 sec)

  1. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      %2 = "tfl.exp"(%1) : (tensor<8x128xf32>) -> tensor<8x128xf32>
      %3 = "tfl.sum"(%2, %cst) {keep_dims = true} : (tensor<8x128xf32>, tensor<1xi32>) -> tensor<8x1xf32>
      %4 = "tfl.div"(%2, %3) {fused_activation_function = "NONE"} : (tensor<8x128xf32>, tensor<8x1xf32>) -> tensor<8x128xf32>
      func.return %4 : tensor<8x128xf32>
    
    // CHECK-LABEL: SoftMaxWithNormalization
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/tests/lower-static-tensor-list-enable-dynamic-update-slice.mlir

    func.func @tensorlistSetItem(%arg0: tensor<3x10xf32>, %arg1: tensor<1xi32>, %arg2: tensor<i32>, %arg3: tensor<10xf32>) -> tensor<3x10xf32> {
      %0 = "tf.TensorListFromTensor"(%arg0, %arg1) : (tensor<3x10xf32>, tensor<1xi32>) -> tensor<!tf_type.variant<tensor<10xf32>>>
      %1 = "tf.TensorListSetItem"(%0, %arg2, %arg3) : (tensor<!tf_type.variant<tensor<10xf32>>>, tensor<i32>, tensor<10xf32>) -> tensor<!tf_type.variant<tensor<10xf32>>>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 28 14:24:59 UTC 2022
    - 2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_legacy.mlir

    }
    
    // CHECK-LABEL: softmax
    func.func @softmax(%arg0: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      func.return %0 : tensor<8x16xf32>
    // CHECK: %[[SOFTMAX_0:.*]] = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
    // CHECK: return %[[SOFTMAX_0]] : tensor<8x16xf32>
    }
    
    // CHECK-LABEL: conv2d_backprop_input_with_add
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tests/legalize-tf-variables.mlir

      // CHECK: %[[ADD:.*]] = tfl.add %[[VAR_VAL]], %arg0 {fused_activation_function = "NONE"} : tensor<1x10xf32>
      // CHECK: "tfl.assign_variable"(%[[RESOURCE]], %[[ADD]]) : (tensor<!tf_type.resource<tensor<1x10xf32>>>, tensor<1x10xf32>) -> ()
      // CHECK: %[[RESULT:.*]] = "tfl.read_variable"(%[[RESOURCE]]) : (tensor<!tf_type.resource<tensor<1x10xf32>>>) -> tensor<1x10xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 7.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/function-resource-args-handle-info.mlir

    func.func @main(%arg0: tensor<*x!tf_type.resource<tensor<8x1xf32>>>) -> tensor<8x1xf32> {
      %0 = tf_executor.graph {
         %outputs, %control = tf_executor.island wraps "tf.ReadVariableOp"(%arg0) : (tensor<*x!tf_type.resource<tensor<8x1xf32>>>) -> tensor<8x1xf32>
         tf_executor.fetch %outputs : tensor<8x1xf32>
      }
      func.return %0 : tensor<8x1xf32>
    }
    
    // Check that we generate _handle_dtypes and _handle_shapes for the resource
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 25 12:28:56 UTC 2022
    - 1.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/unwrap_xla_call_module_op.mlir

      func.func private @main_0(%arg0: tensor<10x1x3xf32>) -> tensor<3x10xf32> attributes {_from_xla_call_module} {
        %0 = stablehlo.reshape %arg0 : (tensor<10x1x3xf32>) -> tensor<3x10xf32>
        return %0 : tensor<3x10xf32>
      }
      // CHECK: %[[RESHAPE:.*]] = stablehlo.reshape
      // CHECK-NEXT: return %[[RESHAPE]]
    
      // CHECK: @main_1
      func.func private @main_1(%arg0: tensor<3x10xf32>) -> tensor<6x5xf32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 08 22:40:14 UTC 2024
    - 3.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/tensorflow/tests/fallback_to_flex_ops_default.mlir

    func.func @softmax(%arg0: tensor<8x16xf32>) -> tensor<8x16xf32> {
      %0 = "tf.Softmax"(%arg0) : (tensor<8x16xf32>) -> tensor<8x16xf32>
      func.return %0 : tensor<8x16xf32>
    // CHECK: %[[CUSTOM_0:.*]] = "tfl.custom"(%arg0) <{custom_code = "FlexSoftmax", custom_option = #tfl<const_bytes : "0x07536F66746D617800161207536F66746D61781A002A070A0154120230013200000221191414042801">}> : (tensor<8x16xf32>) -> tensor<8x16xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 13.4K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/experimental/tac/tests/get-op-cost.mlir

      %1 = "tfl.reshape"(%0, %cst) {tac.device = "GPU"} : (tensor<200xf32>, tensor<2xi64>) -> tensor<2x100xf32>
      func.return %1 : tensor<2x100xf32>
    }
    
    func.func @concat_reshape_CPU(%arg0: tensor<100xf32>, %arg1: tensor<100xf32>) -> tensor<2x100xf32> attributes {tac.device = "CPU", tac.interface_name = "func_2"} {
      %cst = arith.constant dense<[2, 100]> : tensor<2xi64>
      // CHECK: tac.cost = 1.000000e+02
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 24 05:29:10 UTC 2022
    - 5.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/tests/replicate_tensor_list_init_ops.mlir

        "tf.Yield"(%barg1, %barg2) : (tensor<!tf_type.variant<tensor<?x1xf32>>>, tensor<!tf_type.variant<tensor<?x1xf32>>>) -> ()
      }) {is_stateless = false} : (tensor<!tf_type.variant<tensor<?x1xf32>>>, tensor<!tf_type.variant<tensor<?x1xf32>>>) -> (tensor<!tf_type.variant<tensor<?x1xf32>>>, tensor<!tf_type.variant<tensor<?x1xf32>>>)
      func.return
    }
    
    // CHECK: while_region_op_two_sep_args_empty_tensor_list
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sun Jan 22 17:28:34 UTC 2023
    - 8.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/tests/graphdef2mlir/mlir_passthrough_op.pbtxt

    # CHECK: mlir_module = "\0Afunc @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> {\0A %add = \22tf.Add\22(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10xf32>\0A %ret = \22magic.op\22(%add, %add) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32>\0A return %ret : tensor<10x10xf32>\0A}\0A"}> {device = ""} : (tensor<10xf32>, tensor<10xf32>) -> tensor<*xf32>
    
    node {
      name: "x"
      op: "Placeholder"
      attr {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 1.9K bytes
    - Viewed (0)
Back to top