Search Options

Results per page
Sort
Preferred Languages
Advance

Results 91 - 100 of 1,600 for Auto (0.05 sec)

  1. tensorflow/compiler/mlir/lite/quantization/device_target.cc

      auto in_spec = input_specs[0].dyn_cast<UniformQuantizedType>();
      // TODO(fengliuai): handles the PerAxis QuantizedType.
      auto w_spec = input_specs[1].dyn_cast<UniformQuantizedType>();
      auto b_spec = input_specs[2].dyn_cast<UniformQuantizedType>();
      auto o_spec = out_specs[0].dyn_cast<UniformQuantizedType>();
      if (!in_spec || !w_spec || !b_spec || !o_spec) return failure();
    
      double scale_product = in_spec.getScale() * w_spec.getScale();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 08 10:41:08 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/utils/translate_utils.cc

    void PopulateTfVersions(mlir::ModuleOp module, const VersionDef& versions) {
      mlir::Builder b(module.getContext());
      auto producer =
          b.getNamedAttr("producer", b.getI32IntegerAttr(versions.producer()));
      auto min_consumer = b.getNamedAttr(
          "min_consumer", b.getI32IntegerAttr(versions.min_consumer()));
      auto bad_consumers = b.getNamedAttr(
          "bad_consumers",
          b.getI32ArrayAttr(llvm::ArrayRef<int32_t>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 3.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/push_transpose_through_ewise.cc

        }
    
        if (!tpose_arg->hasOneUse()) {
          return failure();
        }
    
        auto tpose_arg_type =
            llvm::dyn_cast<RankedTensorType>(tpose_arg->getResultTypes()[0]);
        auto cst_arg_type =
            llvm::dyn_cast<RankedTensorType>(cst_arg->getResultTypes()[0]);
    
        auto tpose_arg_rank = tpose_arg_type.getRank();
        auto cst_arg_rank = cst_arg_type.getRank();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 12.5K bytes
    - Viewed (0)
  4. tensorflow/c/experimental/stream_executor/stream_executor_test.cc

                              SP_Stream stream) -> void {
        auto custom_stream = static_cast<SP_Stream_st*>(stream);
        ASSERT_EQ(custom_stream->stream_id, 14);
        delete custom_stream;
        stream_deleted = true;
      };
    
      StreamExecutor* executor = GetExecutor(0);
      ASSERT_FALSE(stream_created);
      TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
      ASSERT_TRUE(stream_created);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 20 19:54:04 UTC 2024
    - 26.5K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/dot_general.cc

        flattened_contracting_segids[i] = 0;
      }
      auto seg_prod_result_type =
          RankedTensorType::get(static_cast<int32_t>(1), builder.getI32Type());
      auto out_segids_cst = builder.create<TFL::ConstOp>(
          builder.getI32TensorAttr(flattened_out_segids));
      auto contracting_segids_cst = builder.create<TFL::ConstOp>(
          builder.getI32TensorAttr(flattened_contracting_segids));
      auto num_segids_tensor =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 19.2K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/analysis/tf_dataflow.h

                                     ArrayRef<StateT *> results) {
        if (auto cast = dyn_cast<TF::CastOp>(op)) {
          this->join(results[0], *operands[0]);
        } else if (auto while_op = dyn_cast<TF::WhileRegionOp>(op)) {
          for (auto &region : while_op->getRegions()) {
            for (auto [arg, value] :
                 llvm::zip(region.getArguments(), while_op->getOperands())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 06 23:53:00 UTC 2024
    - 3.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

           subgraph_idx++) {
        const auto quantized_graph = output_model->subgraphs()->Get(subgraph_idx);
        const auto float_graph = model_->subgraphs()->Get(subgraph_idx);
        ASSERT_EQ(quantized_graph->tensors()->size(),
                  float_graph->tensors()->size());
        std::vector<int> used_tensors;
        for (size_t i = 0; i < quantized_graph->tensors()->size(); i++) {
          const auto quant_tensor = quantized_graph->tensors()->Get(i);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_to_mhlo_int_test.cc

      return %2 : tensor<10x10xi8>
    })mlir";
      TF_ASSERT_OK_AND_ASSIGN(auto input, CreateRandomI8Literal({10, 10}));
      TF_ASSERT_OK_AND_ASSIGN(
          auto input_scale,
          CreateRandomF32Literal({10}, /*min=*/0.0001, /*max=*/2));
      TF_ASSERT_OK_AND_ASSIGN(auto input_zp, CreateRandomI32Literal({10}));
      TF_ASSERT_OK_AND_ASSIGN(
          auto output_scale,
          CreateRandomF32Literal({10}, /*min=*/0.0001, /*max=*/2));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 03 01:03:21 UTC 2024
    - 35.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc

              typename Convert>
    static Attribute BinaryFolder(Op *op) {
      auto lhs_op = op->getLhs().template getDefiningOp<mhlo::ConstantOp>();
      auto rhs_op = op->getRhs().template getDefiningOp<mhlo::ConstantOp>();
      if (!lhs_op || !lhs_op) return {};
    
      auto lhs = dyn_cast_or_null<DenseElementsAttr>(lhs_op.getValue());
      auto rhs = dyn_cast_or_null<DenseElementsAttr>(rhs_op.getValue());
      if (!lhs || !rhs) return {};
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/utils/tftext_utils.cc

               << "output(s) when input has rank " << input_type.getRank();
      }
    
      auto value_type = GetResultType(func, 0);
      if (!RankEquals(value_type, 1) ||
          !mlir::isa<StringType>(value_type.getElementType())) {
        return func.emitError() << "1st output should be string tensor";
      }
      if (func.getNumResults() > 1) {
        auto offset_type = GetResultType(func, 1);
        if (!RankEquals(offset_type, 1) ||
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.4K bytes
    - Viewed (0)
Back to top