Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 184 for created (0.11 sec)

  1. tensorflow/compiler/mlir/quantization/tensorflow/cc/convert_asset_args_test.cc

      ConvertAssetArgsTest() {
        ctx_.loadDialect<func::FuncDialect, TF::TensorFlowDialect,
                         tf_saved_model::TensorFlowSavedModelDialect>();
      }
    
      // Parses `module_op_str` to create a `ModuleOp`. Checks whether the created
      // module op is valid.
      OwningOpRef<ModuleOp> ParseModuleOpString(
          const absl::string_view module_op_str) {
        auto module_op_ref = parseSourceString<ModuleOp>(module_op_str, &ctx_);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 6.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/experimental/common/outline_operations.cc

      FunctionType function_type =
          builder.getFunctionType(input_types, return_types);
    
      std::string function_name = absl::StrCat("func_", subgraph.subgraph_id_);
    
      func::FuncOp new_func = func::FuncOp::create(builder.getUnknownLoc(),
                                                   function_name, function_type);
      new_func.setVisibility(func::FuncOp::Visibility::Private);
      new_func.addEntryBlock();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 8.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/initialize_variables_in_session_init.cc

      auto var_handle_op_in_init = var_handle_op->clone();
      builder.insert(var_handle_op_in_init);
      auto const_op = builder.create<mlir::arith::ConstantOp>(
          session_init_func.getLoc(), tensor_attr.getType(), tensor_attr);
    
      builder.create<TF::AssignVariableOp>(
          session_init_func.getLoc(), llvm::ArrayRef<mlir::Type>{},
          llvm::ArrayRef<mlir::Value>{var_handle_op_in_init->getResult(0),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 23 09:05:47 UTC 2024
    - 7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tensorflow/transforms/tpu_partitioned_op_conversion.cc

      }
    
      if constexpr (is_input) {
        auto pi = builder.create<TF::TPUPartitionedInputV2Op>(
            op.getLoc(), op.getType(), op.getOperands(),
            builder.getI64ArrayAttr(partition_dims), builder.getBoolAttr(false),
            op.get_XlaShardingAttr());
        op->replaceAllUsesWith(pi);
      } else {
        auto po = builder.create<TF::TPUPartitionedOutputV2Op>(
            op.getLoc(), op.getResultTypes(), op.getOperand(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 5.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tf2xla/internal/passes/xla_broadcast.cc

      } else {
        return false;
      }
      return true;
    }
    
    // Create a dummy zero to be fed locally from the host to the TPUExecute.
    Value CreateZeroInput(Location loc, OpBuilder& builder, Attribute zero_attr,
                          DenseIntElementsAttr shape_attr) {
      ConstOp zero = builder.create<ConstOp>(loc, zero_attr);
      zero->setAttr(kICIWeightDistributionMlirBridgeMarker,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 18:52:07 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/cc/config.cc

        config.mutable_calibration_options()
            ->mutable_representative_datasets()
            ->Add(preset_datasets.begin(), preset_datasets.end());
      }
    
      // Create a new `QuantizationSpecs` to replace the existing one. The
      // expansion from `StaticRangePtqPreset` gets populated first and then
      // user-provided explicit `QuantizationSpec`s will be appended.
      QuantizationSpecs new_specs{};
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 03:36:50 UTC 2024
    - 8.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc

                                        /*allow_partial_conversion=*/false);
    
      auto pass_instrumentors = mlir::GetPassInstrumentors();
      for (const auto& creator : pass_instrumentors) {
        tf2xla.addInstrumentation(creator());
      }
      if (DEBUG_DATA_DUMPER()->ShouldDump(module_name.str(), kDebugGroupMain) ||
          VLOG_IS_ON(1)) {
        tensorflow::DumpMlirOpToFile(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 17:24:39 UTC 2024
    - 45.3K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.cc

        return convert(attr);
      }
      return nullptr;
    }
    
    DenseElementsAttr UniformQuantizedPerAxisValueConverter::convert(
        DenseFPElementsAttr attr) {
      // Creates the converter for each chunk. Normally the size of the
      // quantization dim is 3, so we can cache all the converters.
      ShapedType type = attr.getType();
      std::size_t dim_size = type.getDimSize(quantization_dim_);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 4.3K bytes
    - Viewed (0)
  9. tensorflow/cc/framework/gradient_checker.cc

      for (int i = 0; i < y_num; i++) {
        dy_datas[i] = Tensor(ys[i].type(), y_shapes[i]);
        auto dy_data_flat = dy_datas[i].flat<Y_T>();
        dy_data_flat.setZero();
      }
    
      // Create the feed list.
      ClientSession::FeedType feed_list;
      for (int i = 0; i < x_num; i++) {
        feed_list.insert({xs[i], x_datas[i]});
      }
      for (int i = 0; i < y_num; i++) {
        feed_list.insert({dys[i], dy_datas[i]});
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 05:57:22 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/kernels/xla_ops.cc

        compilation_successful.scalar<bool>()() = false;
        ctx->set_output(0, compilation_key);
        ctx->set_output(1, compilation_successful);
        return;
      }
    
      // Each execution of an XlaCompile op creates a new ExecutableClosure, even
      // if it didn't have to compile the cluster because of a compilation-cache
      // hit.  This is because we at least need new snapshots of the resource
      // variables.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
Back to top