Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 90 for getOperation (0.51 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/remove_unused_arguments.cc

        if (!op.isPrivate()) return;
    
        auto call = llvm::dyn_cast<CallableOpInterface>(op.getOperation());
        if (!call) return;
        Region* region = call.getCallableRegion();
        if (!region) return;  // happens e.g. for external functions
    
        auto func = llvm::dyn_cast<FunctionOpInterface>(op.getOperation());
        if (!func || do_not_touch.count(func)) return;
        llvm::BitVector unused_args(func.getNumArguments());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.9K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference_pass.cc

          input_shapes_ = input_shapes_vec;
        }
    
        auto failure_or_converged = InferModuleShape(
            getOperation(), max_iterations_, /*ops_to_skip=*/{}, input_shapes_);
        if (failed(failure_or_converged)) return signalPassFailure();
        if (!failure_or_converged.value()) {
          getOperation().emitError()
              << "shape inference pass did not reach convergence after "
              << max_iterations_;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 12:49:45 UTC 2024
    - 3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_outline_tpu_island.cc

      func::FuncOp callee = from.lookup<func::FuncOp>(symbol_ref.getValue());
      callee.getOperation()->getBlock()->getOperations().remove(
          callee.getOperation());
      to.insert(callee);
    }
    
    void TPUBridgeExecutorIslandOutlining::runOnOperation() {
      MLIRContext *ctx = &getContext();
    
      SymbolTable symbol_table(getOperation());
      if (Operation *nested_module = symbol_table.lookup(kNestedModule)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/launch_to_device_attribute.cc

      // Move all inner ops of the launch to the block containing the launch.
      auto body = launch.GetBody().without_terminator();
      Operation* launch_op = launch.getOperation();
      launch_op->getBlock()->getOperations().splice(
          launch_op->getIterator(), launch.GetBody().getOperations(), body.begin(),
          body.end());
    
      launch.erase();
    
      return success();
    }
    
    void LaunchToDeviceAttributePass::runOnOperation() {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc

      int64_t total_size_transferred = 0;
      for (auto input : to_graph.getOperands()) {
        Operation* input_op = input.getDefiningOp();
        if (input_op && input_op == from_graph.getOperation()) {
          auto input_type =
              mlir::dyn_cast_or_null<RankedTensorType>(input.getType());
          if (input_type == nullptr || !input_type.hasStaticShape()) continue;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

     public:
      using OpRewritePattern<SrcOp>::OpRewritePattern;
    
      LogicalResult matchAndRewrite(SrcOp srcop,
                                    PatternRewriter &rewriter) const final {
        Operation *op = srcop.getOperation();
        bool allTypesFp = true;
        bool allTypesQuantizedOrInt = true;
        for (auto operand : op->getOperands()) {
          ShapedType type = mlir::dyn_cast<ShapedType>(operand.getType());
          if (!type) continue;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc

        }
        // Use the min/max from the operands and the num_bits and narrow_range
        // attribute to create the quantization parameter for the new quantize op.
        rewriter.setInsertionPointAfter(tf_op.getOperation());
        IntegerAttr num_bits = rewriter.getI64IntegerAttr(tf_op.getNumBits());
        BoolAttr narrow_range = rewriter.getBoolAttr(tf_op.getNarrowRange());
        Type res_type = tf_op.getType();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.1K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/transforms/tpu_partitioned_op_conversion.cc

      llvm::SmallVector<TF::TPUReplicateMetadataOp, 4> metadata;
      getOperation()->walk(
          [&metadata](TF::TPUReplicateMetadataOp op) { metadata.push_back(op); });
    
      IntegerAttr num_cores_per_replica;
      if (metadata.size() == 1) {
        num_cores_per_replica = metadata.front().getNumCoresPerReplicaAttr();
      }
    
      auto result = getOperation()->walk([&num_cores_per_replica](Operation* op) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_lowering_pass.cc

      target.addLegalDialect<TFL::TensorFlowLiteDialect>();
      target.addLegalDialect<arith::ArithDialect>();
    
      if (failed(applyPartialConversion(getOperation(), target,
                                        std::move(patterns)))) {
        getOperation().emitError("Composite lowering pass failed.");
        signalPassFailure();
      }
    }
    
    }  // namespace
    
    // Creates an instance of the pass.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:16:05 UTC 2024
    - 3K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/extract_tpu_copy_with_dynamic_shape_op.cc

                            .getOperation();
      tpu_copy_with_dynamic_shape_op->moveBefore(return_op);
      return success();
    }
    
    // Update all the usage of tf_device.return op with launch op result.
    void UpdateReturnOpResultWithLaunchOpResult(tf_device::LaunchOp* launch_op) {
      auto operand_not_in_launch = [&](OpOperand& operand) {
        return !launch_op->getOperation()->isProperAncestor(operand.getOwner());
      };
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.6K bytes
    - Viewed (0)
Back to top