Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 5,224 for SAME (0.07 sec)

  1. tensorflow/compiler/mlir/lite/tests/flatbuffer2mlir/importer_test_min_max.cc

      // FB-SAME:  -8.0, -9.0, -10.0, -11.0, -12.0, -13.0, -14.0, -15.0, -16.0,
      // FB-SAME:  -17.0, -18.0, -19.0, -20.0, -21.0, -22.0, -23.0, -24.0, -25.0,
      // FB-SAME:  -26.0, -27.0, -28.0, -29.0, -30.0, -31.0, -32.0, -33.0, -34.0,
      // FB-SAME:  -35.0, -36.0, -37.0, -38.0, -39.0 ],
      // FB-NEXT:        max: [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 6.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/lift_quantizable_spots_as_functions_xla.mlir

    // CHECK: "tf.PartitionedCall"
    // CHECK-SAME: f = @composite_depthwise_conv2d_with_bias_and_relu6_fn_1
    // Check that the `_tfl_quant_trait` attribute has been removed.
    // CHECK-NOT: _tfl_quant_trait = "fully_quantizable"
    
    // CHECK-LABEL: private @composite_depthwise_conv2d_with_bias_and_relu6_fn_1
    // CHECK: %[[DEPTHWISECONV2D_0:.*]] = "tf.DepthwiseConv2dNative"(%arg0, %arg1)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 8.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/basic.py

      # CHECK:      func {{@[a-zA-Z_0-9]+}}(
      # CHECK-SAME:   %arg0: tensor<f32> {tf._user_specified_name = "x", tf_saved_model.index_path = [0]},
      # CHECK-SAME:   %arg1: tensor<!tf_type.resource<tensor<f32>>>
      # CHECK-SAME:   %arg2: tensor<!tf_type.resource<tensor<f32>>>
      # CHECK-SAME:   tensor<f32> {tf_saved_model.index_path = []})
      # CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["some_function"]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:49:35 UTC 2023
    - 2.5K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

      func.return %3 : tensor<4x8xf32>
    }
    
    // CHECK-LABEL:   func @select(
    // CHECK-SAME:                 %[[VAL_0:.*]]: tensor<2xi1>,
    // CHECK-SAME:                 %[[VAL_1:.*]]: tensor<2xi32>,
    // CHECK-SAME:                 %[[VAL_2:.*]]: tensor<2xi32>) -> tensor<2xi32> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tfrt/tests/ifrt/rewrite_cluster_to_ifrt_call.mlir

    // TODO(b/316226111): the printer may not guarantee the same order of fields. Rewrite the checks to be less sensitive to proto serialization formats.
    // -----
    // Non-SPMD: one input and one output
    //
    // CHECK-LABEL: func.func @serving_default(%arg0: tensor<1x3xf32>) -> tensor<1x3xf32> {
    // CHECK-NEXT:  "tf.IfrtCall"(%arg0)
    // CHECK-SAME:       {program_id = [[PROGRAM_ID:.*]] : i64, variable_arg_indices = []}
    // CHECK-SAME:       (tensor<1x3xf32>) -> tensor<1x3xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Feb 17 07:28:40 UTC 2024
    - 9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_quantized_functions.mlir

    // CHECK-SAME: tf_quant.quantized_ops = ["DepthwiseConv2D", "BiasAdd", "Relu"]
    // CHECK: func private @quantized_matmul_with_bias_fn
    // CHECK: func private @quantized_matmul_with_bias_and_relu_fn
    // CHECK: func private @quantized_matmul_with_bias_and_relu6_fn
    // CHECK: func private @quantized_matmul_fn
    // CHECK-SAME: tf_quant.quantized_ops = ["MatMul"]
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 3.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_initialize_variables_in_session_init.mlir

        func.return %1 : tensor<100x50xf32>
      }
    
      // CHECK-LABEL: func @SessionInitializerFunction()
      // CHECK-SAME: tf_saved_model.exported_names = ["SessionInitializerFunction"]
      // CHECK-SAME: tf_saved_model.initializer_type = "restore_op"
      // CHECK: %[[VAR:.*]] = "tf.VarHandleOp"
      // CHECK-SAME: "var1"
      // CHECK: %[[CST:.*]] = arith.constant dense<> : tensor<0xf32>
      // CHECK: "tf.AssignVariableOp"(%[[VAR]], %[[CST]])
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 19 13:38:14 UTC 2022
    - 5.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/remove_init_variable_v1.py

    # it is being invoked.
    # CHECK: module
    # CHECK-SAME: tf.versions
    # CHECK-SAME: bad_consumers
    # CHECK-SAME: min_consumer
    # CHECK-SAME: producer
    
    # CHECK: "tf_saved_model.global_tensor"() <{is_mutable, sym_name = "[[VAR:[a-zA-Z_0-9]+]]", type = tensor<1x3xf32>, value = {{.*}} : tensor<1x3xf32>}> : () -> ()
    # CHECK-NOT: session_initializer
    
    # CHECK:      func {{@[a-zA-Z_0-9]+}}(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Oct 31 08:49:35 UTC 2023
    - 2.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/tensorflow/tests/prepare_quantize_ptq.mlir

    // CHECK: %[[q2:.*]] = "quantfork.qcast"(%[[reshape]])
    // CHECK-SAME: quant.uniform<i8:f32, 0.010039215461880554:-1>
    // CHECK: %[[dq2:.*]] = "quantfork.dcast"(%[[q2]])
    // CHECK-SAME: quant.uniform<i8:f32, 0.010039215461880554:-1>
    // CHECK: %[[call:.*]] = "tf.PartitionedCall"(%[[dq2]]
    // CHECK-SAME: f = @composite_matmul_with_bias_fn_1
    // CHECK: %[[q3:.*]] = "quantfork.qcast"(%[[call]])
    // CHECK-SAME: quant.uniform<i8:f32, 0.015686274509803921:-1>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 01 10:21:29 UTC 2023
    - 9.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composite_functions.mlir

    // CHECK-SAME: f = @quantize_i8
    // CHECK: %[[conv_quant:.*]] = "tf.PartitionedCall"(%[[quantize]], %[[w_quant]], %[[b_quant]],
    // CHECK-SAME: %[[in_scale]], %[[in_zp]], %[[w_scale]], %[[w_zp]],
    // CHECK-SAME: %[[b_scale]], %[[w_zp]], %[[out_scale]], %[[out_zp]])
    // CHECK-SAME: f = @quantized_conv2d_with_bias_and_relu6_fn_0
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Nov 06 01:23:21 UTC 2023
    - 15.2K bytes
    - Viewed (0)
Back to top