Search Options

Results per page
Sort
Preferred Languages
Advance

Results 11 - 20 of 28 for num_inputs (0.31 sec)

  1. tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc

        FunctionType func_type = func.getFunctionType();
        int num_inputs = func_type.getNumInputs();
        int num_results = func_type.getNumResults();
    
        // For each argument type in function's arguments, change it to uranked
        // tensor type if it's a variant type.
        SmallVector<Type, 8> updated_argument_types;
        updated_argument_types.reserve(num_inputs);
        UpdateTensorListTypes<mlir::OperandRange>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 20:00:43 UTC 2024
    - 70.7K bytes
    - Viewed (0)
  2. tensorflow/c/eager/c_api.cc

    }
    
    void TFE_OpAddInputList(TFE_Op* op, TFE_TensorHandle** inputs, int num_inputs,
                            TF_Status* status) {
      status->status = tensorflow::unwrap(op)->AddInputList(
          {reinterpret_cast<tensorflow::AbstractTensorHandle**>(
               tensorflow::unwrap(inputs)),
           static_cast<size_t>(num_inputs)});
    }
    
    extern int TFE_OpGetFlatInputCount(const TFE_Op* op, TF_Status* status) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 08:11:23 UTC 2024
    - 44K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc

    // TODO(ycao): Support computation with compile-time constant, which requires
    // non-trivial input mapping as implemented now.
    void GetInputMappingForMlir(int num_inputs, std::vector<int>* input_mapping) {
      input_mapping->resize(num_inputs, 0);
      std::iota(input_mapping->begin(), input_mapping->end(), 0);
    }
    
    static void RegisterDialects(mlir::DialectRegistry& registry) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 17:24:39 UTC 2024
    - 45.3K bytes
    - Viewed (0)
  4. tensorflow/c/kernels_experimental.cc

      }
    }
    
    bool TF_IsRefInput(TF_OpKernelContext* ctx, int i, TF_Status* status) {
      auto* cc_ctx = reinterpret_cast<::tensorflow::OpKernelContext*>(ctx);
      if (i < 0 || i >= cc_ctx->num_inputs()) {
        TF_SetStatus(status, TF_OUT_OF_RANGE, "input index out of range");
        return false;
      }
      TF_SetStatus(status, TF_OK, "");
      return cc_ctx->input_is_ref(i);
    }
    
    #ifndef IS_MOBILE_PLATFORM
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 23 06:12:29 UTC 2024
    - 30.9K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/kernels/xla_ops.cc

        : OpKernel(ctx), platform_info_(XlaPlatformInfoFromDevice(ctx->device())) {}
    
    void XlaRunOp::Compute(OpKernelContext* ctx) {
      VLOG(3) << "XlaRunOp " << def().name();
      Tensor key_tensor = ctx->input(ctx->num_inputs() - 1);
    
      bool use_pjrt =
          GetXlaOpsCommonFlags()
              ->tf_xla_use_device_api.IsEnabledInXlaCompileAndRunForDevice(
                  platform_info_.device_type());
    
      if (use_pjrt) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 41.4K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc

        mlir::FunctionType callee_type = callee.getFunctionType();
        SmallVector<Type, 4> expected_callee_input_types;
        auto num_inputs = op.getInputs().size() / 2;
        for (unsigned i = 0; i < num_inputs; ++i) {
          auto input_type =
              mlir::dyn_cast<RankedTensorType>(op.getOperand(i).getType());
          auto scalar = RankedTensorType::get({}, input_type.getElementType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 154.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc

                                            ? mhlo::Precision::DEFAULT
                                            : mlir::mhlo::Precision::HIGHEST;
      llvm::SmallVector<mlir::Attribute, 2> attr_vec;
      const int num_inputs = 2;
      for (int i = 0; i < num_inputs; i++) {
        attr_vec.push_back(
            mlir::mhlo::PrecisionAttr::get(builder->getContext(), precision));
      }
      return builder->getArrayAttr(attr_vec);
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 20:00:43 UTC 2024
    - 291.8K bytes
    - Viewed (0)
  8. tensorflow/c/experimental/ops/gen/cpp/views/op_view.cc

    const std::vector<AttrView>& OpView::Attributes() const {
      return argument_attrs_;
    }
    
    const std::vector<OpArgumentView>& OpView::AllArguments() const {
      return all_arguments_;
    }
    
    int OpView::NumInputs() const { return input_args_.size(); }
    
    int OpView::NumOutputs() const { return output_args_.size(); }
    
    ArgView OpView::OnlyInput() const {
      CHECK_EQ(input_args_.size(), 1);  // Crash OK
      return input_args_.front();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Jun 03 07:02:00 UTC 2024
    - 3.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/translate/import_model.cc

          }
        }
    
        return mlir::UnrankedTensorType::get(element_type);
      };
    
      // Below we only try and do some shape inference for "source" ops which have
      // no inputs.
      if (node.num_inputs() > 0) return default_type();
    
      // Do some simply inference here to get the function arguments correct for
      // this common case.
      // TODO(jpienaar): Reconsider post refactoring shape functions.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 01 11:17:36 UTC 2024
    - 183.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/transforms/region_control_flow_to_functional.cc

          IsSingleCallRegion(while_region.getBody()), while_arg_matcher);
    
      // All existing inputs to while region are inputs to the functional while.
      auto new_inputs = llvm::to_vector<4>(while_region.getOperands());
    
      // All existing results will also be generated by the functional while.
      auto new_result_types = llvm::to_vector<4>(while_region.getResultTypes());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 28.7K bytes
    - Viewed (0)
Back to top