Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 1,016 for rewrites (0.2 sec)

  1. tensorflow/compiler/mlir/tf2xla/transforms/tf_xla_passes.td

      let constructor = "::mlir::mhlo::CreateLegalizeTFCommunicationPass()";
      let description = [{
        A pass that legalizes TF/XLA communication ops, propagates their respective
        tokens (for ordering), and rewrites their respective functions and control
        flow ops when necessary.
    
        For example, given the program
    
        ```mlir
          func @send_to_host(%arg0: tensor<i32>) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 03 05:56:39 UTC 2023
    - 1.7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/tf_device_passes.td

      let dependentDialects = ["tf_device::TensorFlowDeviceDialect"];
    }
    
    def XlaRewritePass : Pass<"tf-xla-rewrite", "mlir::ModuleOp"> {
      let summary = "Rewrites partition calls into Xla launch ops to make the attached function run on XLA.";
    
      let description = [{
        This pass rewrites `tf.PartitionedCall` and `tf.StatefulPartitionedCall`
        operations with `_xla_compile_device_type` attribute in a
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 17 18:52:57 UTC 2024
    - 12.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tfr/passes/passes.h

                                          RewritePatternSet &patterns);
    
    // Decompose ops.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateDecomposeTFOpsPass(
        std::optional<ModuleOp> tfr_module = std::nullopt);
    
    // Rewrites quantized operands and results with their storage types.
    // This pass should be run at module level after decomposition, if there are
    // quantized operands or results.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 08 01:19:25 UTC 2023
    - 2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/tensorlist_patterns.td

    include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.td"
    
    //===----------------------------------------------------------------------===//
    // TensorList transformation patterns.
    // Note that the pattern below rewrites `TensorList` tensors  (which has type DT_VARIANT)
    // into regular tensors. We also assume that each element in the `TensorList` has
    // a same constant shape.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 16 23:20:46 UTC 2022
    - 1.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/lower_quantized.cc

    See the License for the specific language governing permissions and
    limitations under the License.
    ==============================================================================*/
    
    // Rewrites ops that require quantized inputs or outputs to ops that allow
    // non-quantized inputs and outputs.
    
    #include "mlir/Dialect/Func/IR/FuncOps.h"  // from @llvm-project
    #include "mlir/Pass/Pass.h"  // from @llvm-project
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Oct 05 23:50:19 UTC 2022
    - 1.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.td

    ==============================================================================*/
    
    include "mlir/Pass/PassBase.td"
    
    def TPURewritePass : Pass<"tf-tpu-rewrite", "mlir::ModuleOp"> {
      let summary = "Rewrites a `tf_device.cluster_func` on TPUs into TPU runtime operations.";
    
      let description = [{
        This pass rewrites a `tf_device.cluster_func` operation into a sequence of `tf._TPUCompileMlir`
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jan 10 18:58:57 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/runtime_passes.h

    #include <memory>
    
    #include "llvm/ADT/StringRef.h"
    #include "mlir/IR/BuiltinOps.h"  // from @llvm-project
    #include "mlir/Pass/Pass.h"  // from @llvm-project
    
    namespace mlir {
    namespace TFTPU {
    
    // Creates a pass that rewrites `tf_device.launch_func` on TPUs into TPU runtime
    // ops.
    std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> CreateTPURewritePass(
        llvm::StringRef module_name = llvm::StringRef());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jan 10 18:58:57 UTC 2024
    - 2.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/jit/xla_compile_on_demand_op.h

    #include "tensorflow/core/framework/types.h"
    #include "tensorflow/core/lib/core/status.h"
    
    namespace tensorflow {
    
    // An OpKernel that compiles an op to an XLA computation and runs it. Unlike
    // XlaLaunch this doesn't rely on any rewrites of the graphdef - it will run a
    // vanilla TensorFlow op as long as the bridge supports it.
    class XlaCompileOnDemandOp : public OpKernel {
     public:
      explicit XlaCompileOnDemandOp(OpKernelConstruction* ctx)
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 3.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/convert_launch_func_to_tf_call.cc

    namespace mlir {
    namespace TFDevice {
    
    namespace {
    
    #define GEN_PASS_DEF_CONVERTLAUNCHFUNCTOTFCALLPASS
    #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc"
    
    // Rewrites tf_device::LaunchFuncOp into TF::PartitionedCallOp.
    struct ConvertLaunchFuncToTFCallPass
        : public impl::ConvertLaunchFuncToTFCallPassBase<
              ConvertLaunchFuncToTFCallPass> {
      void runOnOperation() override;
    };
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 31 21:08:09 UTC 2023
    - 2.8K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/quantize_preprocess.cc

      pm.addPass(mlir::createInlinerPass());
      pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
      pm.addPass(mlir::quant::CreateCastBf16OpsToF32Pass());
    
      // Optimizes the graph via cleanups, merges, rewrites, constant folding,
      // and edge case handling where possible.
      pm.addNestedPass<mlir::func::FuncOp>(
          mlir::TF::CreateDropWhileShapeInvariantPass());
      pm.addNestedPass<mlir::func::FuncOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 12:49:45 UTC 2024
    - 9.8K bytes
    - Viewed (0)
Back to top