Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 67 for call_op (0.12 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc

        if (method.ok() && method->has_static_range_ptq()) return true;
      }
    
      TF::PartitionedCallOp call_op = dyn_cast_or_null<TF::PartitionedCallOp>(op);
      return call_op && call_op->hasAttrOfType<StringAttr>(kQuantTraitAttrName) &&
             call_op->getAttrOfType<StringAttr>(kQuantTraitAttrName).getValue() ==
                 llvm::StringRef(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 14.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/common/lift_as_function_call_test.cc

      ASSERT_TRUE(module_op);
    
      func::FuncOp main_fn = FindMainFuncOp(*module_op);
      ASSERT_THAT(main_fn, NotNull());
    
      auto call_op = *main_fn.getOps<TF::XlaCallModuleOp>().begin();
      EXPECT_TRUE(HasWeightOnlyPtqMethod(call_op));
    }
    
    TEST_F(LiftAsFunctionCallTest, HasWeightOnlyPtqMethodDifferentMethod) {
      const absl::string_view kModuleDotNoQuantization = R"mlir(
        module {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 26.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_hashtable_ops_as_args.cc

      for (auto& function_use : function_uses.value()) {
        auto call_op = function_use.getUser();
        auto caller_func = call_op->getParentOfType<func::FuncOp>();
        if (!caller_func) return failure();
    
        builder.setInsertionPoint(call_op);
        for (auto [lifted_op, arg_idx] : lifted_op_and_arg_idx) {
          auto new_op = builder.clone(*lifted_op, mapping);
          call_op->insertOperands(arg_idx, new_op->getResult(0));
        }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 8.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/common/lift_as_function_call.cc

      return method.has_weight_only_ptq();
    }
    
    bool IsWeightOnlyQuantizableOp(const Operation& op) {
      if (auto call_op = dyn_cast<TF::XlaCallModuleOp>(op)) {
        StringRef entry_function_name = GetEntryFunctionName(call_op);
        absl::StatusOr<Method> quantization_method = GetQuantizationMethod(call_op);
        return ContainsConvOrDot(entry_function_name) && quantization_method.ok() &&
               quantization_method->has_weight_only_ptq();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 21.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc

        }
      }
    }
    
    }  // namespace
    
    std::unique_ptr<OpQuantSpec> GetStableHloOpQuantSpec(Operation* op) {
      auto spec = std::make_unique<OpQuantSpec>();
      if (auto call_op = dyn_cast_or_null<TF::XlaCallModuleOp>(op)) {
        auto entry_function =
            call_op->getAttrOfType<FlatSymbolRefAttr>("_entry_function");
        StringRef function_name = entry_function.getValue();
        if (!function_name.starts_with("composite_")) {
          return spec;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 7.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

    // Checks if an op calls a composite function and all the inputs and outputs are
    // quantized.
    bool IsQuantizedCompositeFunction(func::CallOp call_op) {
      if (!call_op.getCallee().starts_with("quantized_")) {
        return false;
      }
    
      bool has_quantized_types = false;
      for (Value operand : call_op.getOperands()) {
        if (const TensorType type = mlir::dyn_cast<TensorType>(operand.getType())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h

    // quantization.
    inline FlatSymbolRefAttr GetFuncAttr(TF::PartitionedCallOp call_op) {
      return mlir::dyn_cast<FlatSymbolRefAttr>(call_op.getFAttr());
    }
    
    inline FlatSymbolRefAttr GetFuncAttr(TF::XlaCallModuleOp call_op) {
      return call_op->getAttrOfType<FlatSymbolRefAttr>(
          TF::kStablehloEntryFunctionAttrName);
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc

          }
          if (failed(HandleCaseOrIfOp(case_op, branch_functions))) return failure();
        } else if (auto call_op = llvm::dyn_cast<TF::PartitionedCallOp>(&op)) {
          auto callee = call_op.func();
          if (!callee) {
            return call_op.emitOpError(
                "resource lifting does not support call with nested references.");
          }
          if (failed(HandlePartitionedCallOp(call_op, callee, module,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 55.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc

      func::FuncOp main_fn = FindMainFuncOp(*module_op);
      ASSERT_THAT(main_fn, NotNull());
    
      Operation* call_op = FindOperationOfType<TF::XlaCallModuleOp>(main_fn);
      EXPECT_FALSE(IsHybridQuantizedOp(call_op));
    }
    
    constexpr absl::string_view kModuleDotGeneralFullyConnected = R"mlir(
      module {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 22.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.cc

        if (!main_func) continue;
    
        SymbolTable symbol_table(module_op);
        for (auto call_op : main_func.getOps<TF::PartitionedCallOp>()) {
          func_ops.push_back(dyn_cast_or_null<func::FuncOp>(symbol_table.lookup(
              mlir::cast<FlatSymbolRefAttr>(call_op.getFAttr()).getValue())));
        }
        for (auto call_op : main_func.getOps<TF::StatefulPartitionedCallOp>()) {
          func_ops.push_back(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21K bytes
    - Viewed (0)
Back to top