Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 58 for created (0.1 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/passes/propagate_quantize_type.cc

        if (failed(applyPatternsAndFoldGreedily(func, frozen_patterns))) {
          func.emitError() << "quant-propagate-quantize-type failed.";
          signalPassFailure();
        }
      }
    }
    
    }  // namespace
    
    // Creates an instance of the TensorFlow dialect PropagateQuantizeType pass.
    std::unique_ptr<OperationPass<ModuleOp>> CreatePropagateQuantizeTypePass() {
      return std::make_unique<PropagateQuantizeType>();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 7K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

        if (constant.getType().getRank() != 2) return failure();
    
        // Create a tfl.transpose op that performs ZX transpose on `input`.
        auto create_z_x_transpose_op = [&](Value input) -> Value {
          RankedTensorType input_type =
              mlir::cast<RankedTensorType>(input.getType());
          const int input_rank = input_type.getRank();
    
          // Create a 1D I32 tensor for representing the dimension permutation.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/stablehlo/passes/nchw_convolution_to_nhwc.cc

        Value input = op->getOperand(0);
        const TensorType new_input_tensor_type = GetTransposedTensorType(
            mlir::cast<TensorType>(input.getType()), kNchwToNhwcPermutation);
    
        auto input_transpose_op = rewriter.create<mlir::stablehlo::TransposeOp>(
            op.getLoc(), /*resultType0=*/new_input_tensor_type, /*operand=*/input,
            rewriter.getDenseI64ArrayAttr(kNchwToNhwcPermutation));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.2K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc

          if (QuantizedType::getQuantizedElementType(operand.getType())) {
            auto newTy = QuantizedType::castToExpressedType(operand.getType());
            newOperands.push_back(
                rewriter.create<TFL::DequantizeOp>(loc, newTy, operand));
            continue;
          }
    
          newOperands.push_back(operand);
        }
    
        SmallVector<Type> newResultTys;
        for (auto result : op->getResults()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.8K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/lower_globals_to_ml_program.cc

        if (globalTensor.getValue()) {
          initial_value = *globalTensor.getValue();
        } else {
          initial_value = mlir::Attribute();
        }
        opToName[globalTensor] = name;
        auto variableOp = globalBuilder.create<ml_program::GlobalOp>(
            globalTensor.getLoc(), name, globalTensor.getType(),
            globalTensor.getIsMutable(), initial_value,
            /*visibility=*/globalBuilder.getStringAttr("private"));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/debug/debug_test.cc

              return registry;
            }()) {
        context_.loadAllAvailableDialects();
    
        mlir::OpBuilder builder(&context_);
        module_ = builder.create<mlir::ModuleOp>(builder.getUnknownLoc());
    
        builder.setInsertionPointToStart(module_->getBody());
        auto func = builder.create<mlir::func::FuncOp>(  //
            builder.getUnknownLoc(), "main", builder.getFunctionType({}, {}));
        func->setAttr("tfl.func", builder.getUnitAttr());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 17 11:15:16 UTC 2024
    - 9.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/odml_converter/folders.cc

    namespace {
    
    // Helper class for parsing operands to a foldable operation.
    class FoldAdaptor {
     public:
      // Returns std::nullopt if the operation cannot be folded.
      static std::optional<FoldAdaptor> Create(Operation* operation) {
        auto foldable_opr = [](Value val) -> bool {
          return !llvm::isa<BlockArgument>(val) &&
                 llvm::isa<stablehlo::ConstantOp>(val.getDefiningOp());
        };
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 06:11:55 UTC 2024
    - 4.5K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/stablehlo/transforms/composite_lowering_pass.cc

                                        std::move(patterns)))) {
        getOperation().emitError("Composite lowering pass failed.");
        signalPassFailure();
      }
    }
    
    }  // namespace
    
    // Creates an instance of the pass.
    std::unique_ptr<OperationPass<ModuleOp>> CreateCompositeLoweringPass() {
      return std::make_unique<CompositeLoweringPass>();
    }
    
    // Registers the pass implementation
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 23:16:05 UTC 2024
    - 3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_stablehlo_custom_call_to_composite.cc

          return op->emitError("expected exactly one called_computation");
    
        auto decomposition = mlir::cast<FlatSymbolRefAttr>(calledComputations[0]);
    
        auto composite = rewriter.create<mlir::stablehlo::CompositeOp>(
            op.getLoc(), op.getResultTypes(), op.getOperands(), name.str(), attrs,
            decomposition.getValue());
        rewriter.replaceOp(op, composite.getResults());
        return success();
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc

      auto* ctx = func.getContext();
      patterns.add<PushDownDequantize>(ctx);
      if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
        signalPassFailure();
      }
    }
    }  // namespace
    
    // Creates an instance of the TensorFlow Lite optimize op order pass.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateOptimizeOpOrderPass() {
      return std::make_unique<OptimizeOpOrderPass>();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
Back to top