Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 116 for callFoo (0.1 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_cfg.cc

      Block* then_block = builder.createBlock(merge_block);
      Operation* call_op = CallFn(loc, get_operand, op.then_function(), &builder);
    
      auto get_then_result = [&](int i) { return call_op->getResult(i); };
      JumpToBlock(loc, get_then_result, merge_block, &builder);
    
      // Set up the 'else' block.
      Block* else_block = builder.createBlock(merge_block);
      call_op = CallFn(loc, get_operand, op.else_function(), &builder);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Jan 13 11:42:59 UTC 2023
    - 12.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/add_functions_for_exported_names.cc

          other.addEntryBlock();
          OpBuilder builder(other.getRegion());
          auto call_op = builder.create<mlir::func::CallOp>(
              f.getLoc(), f.getFunctionType().getResults(), f.getSymName(),
              other.getRegion().getArguments());
          builder.create<mlir::func::ReturnOp>(f.getLoc(), call_op.getResults());
        }
    
        Unexport(f);
      }
    }
    
    }  // namespace
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 19 08:06:04 UTC 2023
    - 4.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/experimental/common/outline_operations.h

    // `ExtractSubgraphToFunc` adds exactly two "new" `Operations`, a FuncOp and
    // a CallOp. Pass these back to the caller for setting more specific attributes
    // after graph mutation has taken place.
    struct OpsAdded {
      mlir::func::FuncOp func_op;
      mlir::func::CallOp call_op;
    };
    
    // Given a `Subgraph` containing a sequence of adjacent `Operations` from
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Nov 17 18:49:43 UTC 2022
    - 6K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/stablehlo/passes/unwrap_xla_call_module_op.cc

    };
    
    void UnwrapXlaCallModuleOp(TF::XlaCallModuleOp call_op,
                               SymbolTable& symbol_table) {
      // Do not inline lifted quantized functions used for fusing patterns.
      // TODO - b/310539922: Remove reference to TF/TFL utils.
      if (call_op->hasAttr(kQuantTraitAttrName)) {
        return;
      }
    
      auto function_name = call_op
                               ->getAttrOfType<FlatSymbolRefAttr>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 07:39:40 UTC 2024
    - 4.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/restore_function_name.cc

    void RestoreFunctionNameFromXlaCallModuleOp(TF::XlaCallModuleOp& call_op,
                                                SymbolTable& symbol_table) {
      if (!call_op->hasAttr(kOriginalStablehloEntryFunctionAttrName)) {
        return;
      }
    
      const auto original_function_name = call_op->getAttrOfType<StringAttr>(
          kOriginalStablehloEntryFunctionAttrName);
      const auto current_function_name = call_op->getAttrOfType<FlatSymbolRefAttr>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Mar 05 08:32:43 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/internal/passes/xla_cluster_formation.cc

    };
    
    void EncapsulatePartitionedCall(Operation *call_op,
                                    mlir::StringAttr callee_name) {
      OpBuilder builder(call_op);
      auto cluster = builder.create<mlir::tf_device::ClusterOp>(
          call_op->getLoc(), call_op->getResultTypes());
      cluster.getBody().push_back(new Block);
      call_op->replaceAllUsesWith(cluster.getResults());
      call_op->moveBefore(&cluster.GetBody(), cluster.GetBody().end());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 19 19:09:44 UTC 2023
    - 6K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/passes/add_dump_tensor_op.cc

        TF::PartitionedCallOp call_op, const FlatSymbolRefAttr &f_attr) {
      std::optional<QuantizationUnitLoc::QuantizationUnit> quant_unit =
          FindQuantizationUnitFromLoc(call_op->getLoc());
      return std::make_pair(quant_unit->func_name(), quant_unit->node_name());
    }
    
    std::pair<std::string, std::string> GetFuncNameAndNodeName(
        TF::XlaCallModuleOp call_op, const FlatSymbolRefAttr &f_attr) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 22 22:55:22 UTC 2024
    - 13K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

    bool IsQuantizedCallforDynamicRange(TF::PartitionedCallOp call_op) {
      bool has_quantized_types_for_weights = false;
      std::unique_ptr<OpQuantSpec> spec = GetTFOpQuantSpec(call_op);
    
      for (int32_t cur_idx = 0; cur_idx < call_op.getArgs().size(); cur_idx++) {
        // Check if the only the weight index has QuantizeCastOp.
        auto cur_op = dyn_cast_or_null<quantfork::QuantizeCastOp>(
            call_op.getArgs()[cur_idx].getDefiningOp());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_inline_tpu_island.cc

        if (!call_op.getF().getRootReference().getValue().starts_with(
                kNestedModule))
          return WalkResult::advance();
        // This is a call we need to inline!
        LLVM_DEBUG(llvm::dbgs()
                   << "Found call to inline: " << *call_op.getOperation() << "\n");
    
        auto call_interface = cast<CallOpInterface>(call_op.getOperation());
        auto called_func =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 19 08:06:04 UTC 2023
    - 4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tpu_model_to_cpu.cc

      using OpRewritePattern<TF::TPUPartitionedCallOp>::OpRewritePattern;
    
     private:
      LogicalResult matchAndRewrite(TF::TPUPartitionedCallOp call_op,
                                    PatternRewriter& rewriter) const override {
        auto f_attr = mlir::dyn_cast<FlatSymbolRefAttr>(call_op.getFAttr());
        auto module_op = call_op->getParentOfType<ModuleOp>();
        SymbolTable symbol_table(module_op);
    
        auto f_name = f_attr.getValue();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.5K bytes
    - Viewed (0)
Back to top