Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 144 for mhlo (0.24 sec)

  1. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf.mlir

      // CHECK-NEXT: %[[cmul:.*]] = mhlo.convert %[[mul]] : tensor<8x8x8x8xf32>
      // CHECK-NEXT: %[[init:.*]] = mhlo.constant dense<-0.000000e+00> : tensor<f32>
      // CHECK-NEXT: %[[red1:.*]] = mhlo.reduce(%[[cmul]] init: %[[init]]) applies mhlo.add across dimensions = [0, 1, 2] : (tensor<8x8x8x8xf32>, tensor<f32>) -> tensor<8xf32>
      // CHECK-NEXT: %[[scr2:.*]] = mhlo.convert %[[red1]] : tensor<8xf32>
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 335.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo.mlir

        %9 = "mhlo.compare"(%arg1, %arg3) {comparison_direction = #mhlo<comparison_direction EQ>} : (tensor<f32>, tensor<f32>) -> tensor<i1>
        %10 = "mhlo.compare"(%arg2, %arg4) {comparison_direction = #mhlo<comparison_direction LT>} : (tensor<i32>, tensor<i32>) -> tensor<i1>
        %11 = mhlo.and %9, %10 : tensor<i1>
        %12 = mhlo.or %7, %11 : tensor<i1>
        %13 = "mhlo.select"(%12, %arg2, %arg4) : (tensor<i1>, tensor<i32>, tensor<i32>) -> tensor<i32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 40.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tf2xla/tests/legalize-tf-quant.mlir

      // CHECK: %[[QUANT0:.*]] = mhlo.uniform_quantize %[[ARG:.*]] : (tensor<?xf32>) -> tensor<?x!quant.uniform<i8:f32, 1.000000e+00:3>>
      // CHECK: %[[CONVERT_1:.*]] = mhlo.bitcast_convert %[[QUANT0]] : (tensor<?x!quant.uniform<i8:f32, 1.000000e+00:3>>) -> tensor<?xi8>
      // CHECK: mhlo.while()
      // CHECK: cond
      // CHECK: %[[CHECK_RES:.*]] = chlo.broadcast_compare
      // CHECK: mhlo.return %[[CHECK_RES]] : tensor<i1>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 01:25:29 UTC 2024
    - 37.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/transforms/tf_stablehlo_pass.cc

    #include "tensorflow/compiler/mlir/tf2xla/transforms/passes.h"
    #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
    #include "xla/mlir_hlo/mhlo/IR/register.h"
    #include "xla/mlir_hlo/mhlo/transforms/passes.h"
    #include "xla/mlir_hlo/mhlo/transforms/rewriters.h"
    #include "xla/mlir_hlo/mhlo/utils/type_conversion.h"
    
    namespace mlir {
    namespace odml {
    
    class TFToMhloPass
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 7.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/tests/tfl_legalize_hlo_custom_call.mlir

    func.func @mhlo_custom_call_test__dont_legalize_dict_backend_config(%arg0: tensor<1x4xf32>) -> tensor<1x8xf32> {
      %0 = mhlo.custom_call @custom_call.my_custom_op(%arg0) {
        api_version = 4 : i32,
        backend_config = {foo = "bar"}
      } : (tensor<1x4xf32>) -> (tensor<1x8xf32>)
      func.return %0 : tensor<1x8xf32>
    
      //       CHECK: %0 = mhlo.custom_call @custom_call.my_custom_op(%arg0) {
      //  CHECK-SAME:   api_version = 4 : i32,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 2.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/tests/legalize_hlo.mlir

    func.func @sigmoid(%arg0: tensor<2xf32>) -> tensor<2xf32> {
      %0 = mhlo.constant dense<5.000000e-01> : tensor<f32>
      %1 = mhlo.constant dense<2> : tensor<1xi64>
      %2 = mhlo.constant dense<5.000000e-01> : tensor<2xf32>
      %3 = mhlo.multiply %arg0, %2 : tensor<2xf32>
      %4 = "mhlo.tanh"(%3) : (tensor<2xf32>) -> tensor<2xf32>
      %5 = mhlo.multiply %4, %2 : tensor<2xf32>
      %6 = mhlo.add %5, %2 : tensor<2xf32>
      func.return %6 : tensor<2xf32>
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 29 07:26:59 UTC 2024
    - 340.2K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/transforms/tf2xla_rewriter.h

      // Compiles the given Operation with XlaBuilder and imports the generated HLO
      // via the HLO -> MHLO importer.
      absl::StatusOr<mhlo::TupleOp> CompileWithHloImporter(
          tensorflow::OpKernelContext& op_context);
    
      // Import the given XlaComputation into the parent module. Returns the given
      // generated function.
      absl::StatusOr<mhlo::TupleOp> ImportXlaComputation(
          xla::XlaComputation& computation);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:16:07 UTC 2024
    - 5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/transforms/unfuse_batch_norm_pass.cc

      return b.createOrFold<mhlo::DynamicBroadcastInDimOp>(
          broadcast_to_type, epsilon, shape_value, dims);
    }
    
    class UnfuseBatchNormTrainingPattern
        : public OpRewritePattern<mhlo::BatchNormTrainingOp> {
     public:
      using OpRewritePattern<mhlo::BatchNormTrainingOp>::OpRewritePattern;
    
      LogicalResult matchAndRewrite(mhlo::BatchNormTrainingOp bn_op,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/tests/composite-lowering.mlir

      %8 = mhlo.divide %7, %2 : tensor<64xf32>
      %9 = mhlo.floor %8 : tensor<64xf32>
      %10 = mhlo.convert %9 : (tensor<64xf32>) -> tensor<64xi32>
      %11 = mhlo.compare  LT, %10, %1,  SIGNED : (tensor<64xi32>, tensor<64xi32>) -> tensor<64xi1>
      %12 = mhlo.add %10, %0 : tensor<64xi32>
      %13 = mhlo.select %11, %12, %10 : tensor<64xi1>, tensor<64xi32>
      %14 = mhlo.reshape %13 : (tensor<64xi32>) -> tensor<64x1xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 32.6K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/stablehlo/transforms/passes.h

    namespace mlir {
    namespace odml {
    
    // Creates a pass which unfuses MHLO batch norm inference op into arithmetic
    // ops.
    std::unique_ptr<Pass> createUnfuseBatchNormPass();
    
    // Creates a pass which constant folds broadcast_in_dim op conditionally.
    std::unique_ptr<Pass> createFoldBroadcastPass();
    
    // Creates a pass which fuses MHLO binary element-wise ops and convolution op.
    std::unique_ptr<Pass> createFuseConvolutionPass();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 21:59:06 UTC 2024
    - 3.2K bytes
    - Viewed (0)
Back to top