Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 3 of 3 for OptimizeQuantizedOpToFloat (0.43 sec)

  1. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.h

    // quantize pair around the op.
    void ConvertQuantizedOpToFloat(func::FuncOp func, OpBuilder* builder);
    
    // This will optimize the quantized ops -> float graph.
    void OptimizeQuantizedOpToFloat(func::FuncOp func, MLIRContext* context);
    
    }  // namespace tac
    }  // namespace TFL
    }  // namespace mlir
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 07 18:43:51 UTC 2022
    - 2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc

        if (!quant_op.getResult().use_empty()) return failure();
    
        rewriter.eraseOp(quant_op);
        return success();
      }
    };
    
    void OptimizeQuantizedOpToFloat(func::FuncOp func, MLIRContext* context) {
      RewritePatternSet patterns(func.getContext());
      patterns
          .add<FoldQuantizedI32ToFloat, FoldQuantizeDequantize, RemoveUnusedQuant>(
              context);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc

          target_device_inference_type.inference_type == FLOAT) {
        OpBuilder cloned_func_builder(cloned_func);
        ConvertQuantizedOpToFloat(cloned_func, &cloned_func_builder);
        OptimizeQuantizedOpToFloat(cloned_func, &getContext());
      }
    
      Optimize(cloned_func, target_device_inference_type.hardware);
    
      // Set device for each op.
      cloned_func.walk([&](Operation* op) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 06 03:08:33 UTC 2023
    - 12.3K bytes
    - Viewed (0)
Back to top