Search Options

Results per page
Sort
Preferred Languages
Advance

Results 111 - 120 of 126 for getDefiningOp (0.18 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/passes/replace_stablehlo_ops_in_main_function_with_xla_call_module_ops.cc

            should_add_op = false;
            SmallVector<Operation*> all_descendants;
            for (Value v : operands) {
              if (defined_values.contains(v)) continue;
              if (ShouldAddOpToSubgraph(v.getDefiningOp(), reverse_subgraph,
                                        ops_to_add, all_descendants)) {
                should_add_op = true;
                break;
              }
            }
            if (should_add_op) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/tpu_validate_inputs.cc

    llvm::SmallVector<Operation*> GetPredecessors(Operation* op) {
      llvm::SmallVector<Operation*> predecessors;
      for (auto operand : op->getOperands()) {
        if (Operation* pred = operand.getDefiningOp()) {
          pred->walk([&](mlir::Operation* opinexecutor) {
            predecessors.push_back(opinexecutor);
          });
        }
      }
      return predecessors;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 06:51:01 UTC 2024
    - 21.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc

      // Returns the first producer op whose type is not in Tys.
      template <typename... Tys>
      Value recursivelyWalkUp(Value op) const {
        while (llvm::isa_and_nonnull<Tys...>(op.getDefiningOp())) {
          Operation* producer = op.getDefiningOp();
          op = producer->getOperand(/*idx=*/0);
        }
    
        return op;
      }
    };
    
    class ConvertMaxPoolOp : public OpConversionPattern<mhlo::ReduceWindowOp> {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 154.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/internal/passes/mark_ops_for_outside_compilation.cc

        host_op->walk([&](Operation* op) {
          // Add any operations that provide variant inputs to the cluster.
          for (auto value : op->getOperands()) {
            Operation* input_defining_op = value.getDefiningOp();
            if (IsVariant(value) && input_defining_op &&
                !HasOutsideCompiledAncestor(input_defining_op) &&
                !input_defining_op->hasAttrOfType<StringAttr>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 21.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

        // Check if the only the weight index has QuantizeCastOp.
        auto cur_op = dyn_cast_or_null<quantfork::QuantizeCastOp>(
            call_op.getArgs()[cur_idx].getDefiningOp());
        if (!cur_op && spec->quantizable_operands.contains(cur_idx)) {
          return false;
        } else if (cur_op) {
          // Check if the QuantizeCastOp has element type of quantized type.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc

        // `tf.TPUPartitionedInputV2` op is input to the i-th logical device.
        if (auto partitioned_input =
                llvm::dyn_cast_or_null<mlir::TF::TPUPartitionedInputV2Op>(
                    input_value.getDefiningOp())) {
          if (UnsupportedPartitionedShardingType(input_sharding_type))
            return cluster_func->emitOpError()
                   << "unsupported input sharding type "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 34K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.cc

    // Returns a non-null value and can return `value` if backtracking is not
    // possible.
    Value BacktrackAnalysis::BacktrackValue(Value value) {
      while (Operation* op = value.getDefiningOp()) {
        int res_index = mlir::cast<OpResult>(value).getResultNumber();
        if (auto graph = dyn_cast<tf_executor::GraphOp>(op)) {
          value = graph.GetFetch().getOperand(res_index);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 15 09:04:13 UTC 2024
    - 28.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/translate/export_graphdef.cc

      for (Value fetch_operand : fetch.getOperands()) {
        if (mlir::isa<mlir::tf_executor::ControlType>(fetch_operand.getType())) {
          Operation* defining_op =
              GetIslandInnerOpOrSelf(fetch_operand.getDefiningOp());
          auto node_it = nodes_.find(defining_op);
          TF_RET_CHECK(node_it != nodes_.end());
          control_ret_nodes->insert(node_it->second);
        }
      }
      return absl::OkStatus();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 35.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.cc

      for (Value fetch_operand : fetch.getOperands()) {
        if (mlir::isa<mlir::tf_executor::ControlType>(fetch_operand.getType())) {
          Operation* defining_op =
              GetIslandInnerOpOrSelf(fetch_operand.getDefiningOp());
          auto node_it = nodes_.find(defining_op);
          TF_RET_CHECK(node_it != nodes_.end());
          control_ret_nodes->insert(node_it->second);
        }
      }
      return absl::OkStatus();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 23:04:51 UTC 2024
    - 35.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/tensor_list_ops_decomposition.cc

    // returns an error.
    LogicalResult GetConstShapeValue(Value shape_value,
                                     llvm::SmallVector<int64_t, 8>* shape) {
      auto shape_op = shape_value.getDefiningOp();
      if (!shape_op) return failure();
      auto shape_const_op = llvm::dyn_cast<TF::ConstOp>(shape_op);
      if (!shape_const_op) return failure();
      for (const auto& v : shape_const_op.getValue().getValues<APInt>()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 39.2K bytes
    - Viewed (0)
Back to top