Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 4,138 for fusing (0.32 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc

    // need/usage. File a bug to request porting over additional fusions.
    
    // TODO(b/158265178): Support GPU-specific fusions.
    // TODO(b/158266710): Support CPU MKL configurations.
    
    #define GEN_PASS_DEF_FUSEDKERNELMATCHERPASS
    #include "tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.h.inc"
    
    // Optimizes TF computations by fusing subgraphs/nodes onto more efficient
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/passes/unwrap_xla_call_module_op.cc

     private:
      void runOnOperation() override;
    };
    
    void UnwrapXlaCallModuleOp(TF::XlaCallModuleOp call_op,
                               SymbolTable& symbol_table) {
      // Do not inline lifted quantized functions used for fusing patterns.
      // TODO - b/310539922: Remove reference to TF/TFL utils.
      if (call_op->hasAttr(kQuantTraitAttrName)) {
        return;
      }
    
      auto function_name = call_op
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

        return "quant-prepare-lifting";
      }
    
      StringRef getDescription() const final {
        // This is a brief description of the pass.
        return "Apply graph optimizations such as fusing and constant folding to "
               "prepare lifting.";
      }
    
      void getDependentDialects(DialectRegistry& registry) const override {
        registry.insert<TF::TensorFlowDialect, arith::ArithDialect>();
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/tf_tfl_passes.cc

            mlir::TFL::CreatePostQuantizePass(emit_quant_adaptor_ops));
      }
      pass_manager.addNestedPass<mlir::func::FuncOp>(
          mlir::TFL::CreateOptimizeOpOrderPass());
      // Add optimization pass after quantization for additional fusing
      // opportunities.
    
      if (!pass_config.unfold_batch_matmul) {
        // Enable an optimization pass that transforms FC to BatchMatmul only when
        // `unfold_batch_matmul=false`.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 25.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.h

        // Safeguard check to ensure that there is at least one quantizable op.
        if (failed(candidate_ops) || candidate_ops->empty()) return failure();
    
        // Rewrite the floating-point ops to the quantized version, by fusing
        // preceding dequantize ops and succeding quantize ops.
        for (Operation* candidate_op : *candidate_ops) {
          // If it is requantize op, we shouldn't rewrite this op.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tests/optimize.mlir

      func.return %7: tensor<1xf32>
    
    // Fusing-LABEL: FusingaddRelu
    // Fusing:  %[[add:[0-9].*]] = tfl.add %arg0, %arg1 {fused_activation_function = "NONE"} : tensor<1xf32>
    // Fusing:  %[[add1:[0-9].*]] = tfl.add %arg0, %[[add]] {fused_activation_function = "RELU"} : tensor<1xf32>
    // Fusing:  %[[relu:[0-9].*]] = "tfl.relu"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 284.1K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/passes.h

    std::unique_ptr<OperationPass<ModuleOp>>
    CreateLiftQuantizableSpotsAsFunctionsPass(
        const tensorflow::quantization::QuantizationOptions& quant_options);
    
    // Apply graph optimizations such as fusing and constant folding to prepare
    // lifting.
    std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareLiftingPass(
        tensorflow::quantization::OpSet target_opset);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 12.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc

        auto users = op.getResult().getUsers();
        quantizing_ops.append(users.begin(), users.end());
    
        bool changed = false;
        // Rewrite the floating-point ops to the quantized version, by fusing
        // preceding dequantize ops and succeding quantize ops.
        for (Operation* quantizing_op : quantizing_ops) {
          // If it is requantize op, we shouldn't rewrite this op.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 05:52:39 UTC 2024
    - 23.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

                data_format='NHWC',
                name='sample/conv',
            )
            if bias_fn is not None:
              out = nn_ops.bias_add(out, self.bias)
            if has_batch_norm:
              # Fusing is supported for non-training case.
              out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(
                  out, scale, offset, mean, variance, is_training=False
              )
            if activation_fn is not None:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

                padding='SAME',
                data_format='NHWC',
            )
            if has_bias:
              out = nn_ops.bias_add(out, self.bias)
            if has_batch_norm:
              # Fusing is supported for non-training case.
              out, _, _, _, _, _ = nn_ops.fused_batch_norm_v3(
                  out, scale, offset, mean, variance, is_training=False
              )
            if activation_fn is not None:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
Back to top