Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 605 for Auto (0.06 sec)

  1. tensorflow/compiler/mlir/lite/stablehlo/transforms/unfuse_batch_norm_pass.cc

      }
    
      auto scalar_type = RankedTensorType::get(/*shape=*/{}, fp_type);
      auto epsilon_tensor_attr = DenseElementsAttr::get(
          scalar_type, {mlir::cast<Attribute>(epsilon_attr)});
      Value epsilon = b.create<mhlo::ConstantOp>(epsilon_tensor_attr);
      auto dims_type = RankedTensorType::get(/*shape=*/{0}, b.getIntegerType(64));
      auto dims = DenseIntElementsAttr::get(dims_type, SmallVector<int64_t, 1>{});
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_communication.cc

      if (flatten_tuple) {
        auto operands = llvm::to_vector(values);
        operands.push_back(token);
        return operands;
      }
    
      auto value = values[0];
      // If value with token already exists, reuse it.
      auto it = rewritten_values.find(value);
      if (it != rewritten_values.end()) return {it->getSecond()};
    
      auto create_tuple = [&](ArrayRef<Value> operands) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 40.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

    bool CanBeSymmetricallyQuantized(Value weight) {
      auto dq_op = weight.getDefiningOp<quantfork::DequantizeCastOp>();
      if (!dq_op) return true;
    
      auto qtype =
          mlir::cast<TensorType>(dq_op.getArg().getType()).getElementType();
      if (auto uniform_type = llvm::dyn_cast_or_null<UniformQuantizedType>(qtype)) {
        return uniform_type.getZeroPoint() == 0;
      } else if (auto per_axis_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/internal/passes/tpu_sharding_identification_pass.cc

      Block& function_block = func.front();
      for (auto sharding_and_arg :
           llvm::zip(sharding_for_args, function_block.getArguments())) {
        const auto& sharding = std::get<0>(sharding_and_arg);
        BlockArgument arg = std::get<1>(sharding_and_arg);
        if (failed(VerifySharding(arg.getType(), sharding))) return mlir::failure();
      }
      Operation* terminator = function_block.getTerminator();
      for (auto sharding_and_retval :
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 02:01:13 UTC 2024
    - 28.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/merge_save_function_ops_to_main.cc

    // exist.
    func::FuncOp GetMainFunction(ModuleOp module_op) {
      const auto main_func_id =
          StringAttr::get(module_op.getContext(), kImportModelDefaultGraphFuncName);
      auto func_ops = module_op.getOps<func::FuncOp>();
      auto main_func_itr = absl::c_find_if(func_ops, [&main_func_id](auto func_op) {
        return func_op.getName() == main_func_id;
      });
    
      if (main_func_itr == func_ops.end()) return {};
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc

      out = std::get<2>(flattended_labels);
    
      auto lhs_map_or = EquationToMap(lhs);
      if (!lhs_map_or.has_value()) return std::nullopt;
      auto lhs_map = lhs_map_or.value();
    
      auto out_map_or = EquationToMap(out);
      if (!out_map_or.has_value()) return std::nullopt;
      auto out_map = out_map_or.value();
    
      EinsumDimensionNumbers dnums;
      for (int64_t i = 0; i < lhs.size(); ++i) {
        auto out_index = out_map.find(lhs[i]);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 33.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc

      groups_to_only = 0;
      for (const auto& [group, branch] : from) {
        auto to_iter = to.find(group);
        if (to_iter == to.end()) {
          ++groups_from_only;
        } else {
          auto to_branch = to_iter->second;
          if (to_branch == branch) {
            ++groups_same_branch;
          } else {
            ++groups_different_branch;
          }
        }
      }
      for (const auto& [group, _] : to) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 15 09:04:13 UTC 2024
    - 41.2K bytes
    - Viewed (0)
  8. tensorflow/c/kernels.cc

      auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
      int start = -1, stop = -1;
      auto status = cc_ctx->op_kernel().InputRange(name, &start, &stop);
      args->start = start;
      args->stop = stop;
      tensorflow::Set_TF_Status_from_Status(args->status, status);
    }
    
    TF_DataType TF_InputDatatype(TF_OpKernelContext* ctx, int index) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 22:53:47 UTC 2024
    - 36K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/tpu_validate_inputs.cc

      if (auto repinput = dyn_cast<TF::TPUReplicatedInputOp>(op)) {
        if (!ValidateReplicatedInput(repinput, num_replicas, tpu_replicate_attr))
          return false;
      }
      if (auto repoutput = dyn_cast<TF::TPUReplicatedOutputOp>(op)) {
        if (!ValidateReplicatedOutput(repoutput, num_replicas, tpu_replicate_attr))
          return false;
      }
      if (auto partinput = dyn_cast<TF::TPUPartitionedInputOp>(op)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 07 06:51:01 UTC 2024
    - 21.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc

      SmallVector<int64_t, 2> output_shape{1, 2};
      auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
      auto weight_type = RankedTensorType::get(weight_shape, builder->getF32Type());
      auto bias_type = RankedTensorType::get(bias_shape, builder->getF32Type());
      auto projection_type =
          RankedTensorType::get(projection_shape, builder->getF32Type());
      auto layer_norm_scale_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10K bytes
    - Viewed (0)
Back to top