Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 50 for input_tensor (0.3 sec)

  1. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

    // "third_party/tensorflow/compiler/xla/xla_data.pb.h" into
    // "third_party/tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc" is
    // resolved
    LogicalResult PrecheckForXlaConvV2Op(XlaConvV2Op op) {
      auto input_tensor = op.getLhs();
      auto kernel_tensor = op.getRhs();
      auto window_strides = op.getWindowStrides();
      auto padding = op.getPadding();
      auto lhs_dilation = op.getLhsDilation();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/tests/insert_custom_aggregation_ops.mlir

    // -----
    
    module attributes {tf.versions = {bad_consumers = [], min_consumer = 12 : i32, producer = 1836 : i32}, tf_saved_model.semantics} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 32.1K bytes
    - Viewed (0)
  3. tensorflow/c/c_api_function.cc

        const TF_Output* inputs, std::vector<OutputTensor>* input_tensors,
        std::unordered_map<const Node*, std::vector<int>>* input_nodes)
        TF_EXCLUSIVE_LOCKS_REQUIRED(fn_body->mu) {
      input_tensors->reserve(ninputs);
      for (int i = 0; i < ninputs; ++i) {
        Node* node = inputs[i].oper ? &inputs[i].oper->node : nullptr;
        int idx = inputs[i].index;
    
        TF_RETURN_WITH_CONTEXT_IF_ERROR(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Apr 15 03:35:10 UTC 2024
    - 13.6K bytes
    - Viewed (0)
  4. tensorflow/c/c_api_experimental_test.cc

          }
        }
        TF_ShapeAndTypeList* output_shapes;
        TFE_InferShapes(op, input_shapes,
                        input_tensors.empty()
                            ? nullptr
                            : const_cast<TF_Tensor**>(input_tensors.data()),
                        /*input_tensors_as_shapes*/ nullptr,
                        /*input_resource_shapes_and_types*/ nullptr, &output_shapes,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 17 22:27:52 UTC 2023
    - 13.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc

        }
      }
    }
    
    struct OutputInputTensorPairHasher {
      uint64 operator()(std::pair<OutputTensor, InputTensor> const& s) const {
        return Hash64Combine(OutputTensor::Hash()(s.first),
                             InputTensor::Hash()(s.second));
      }
    };
    
    // TODO(phawkins) add a canonical copy of these operator names and refactor
    // everything to use it.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 51K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/ir/tfl_ops.td

        QuantizableResult,
        Pure]> {
      let summary = "Mean operator";
    
      let description = [{
        Computes the mean of elements across dimensions of a tensor.
        Reduces input_tensor along the dimensions given in axis.
        Unless keepdims is true, the rank of the tensor is reduced by 1 for
        each entry in axis. If keepdims is true, the reduced dimensions are retained
        with length 1.
      }];
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 19:09:08 UTC 2024
    - 186K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td

      # 2. invert(a) or a = invert(0)
      input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype)
      not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and(
                                          input_tensor, bitwise_ops.invert(input_tensor)),
                                        bitwise_ops.bitwise_or(
                                          input_tensor, bitwise_ops.invert(input_tensor)),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 23:24:08 UTC 2024
    - 793K bytes
    - Viewed (0)
  8. tensorflow/c/c_api_experimental.h

    //     OK to not have the inputs properly set in `op`. See `input_tensors`
    //     if you want shape inference to consider the input tensors of the
    //     op for shape inference.
    //   - The types need not be set in `input_shapes` as it is not used.
    //   - The number of `input_tensors` should be the same as the number of items
    //     in `input_shapes`.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 27 21:07:00 UTC 2023
    - 15.1K bytes
    - Viewed (0)
  9. tensorflow/c/eager/tape.h

      });
      std::vector<Gradient*> in_grads;
      in_grads.reserve(input_tensors.size());
      for (int target_index = 0; target_index < input_tensors.size();
           ++target_index) {
        const auto current_grad =
            accumulated_gradients_.find(input_tensors[target_index].GetID());
        if (current_grad == accumulated_gradients_.end()) {
          if (IsDtypeTrainable(input_tensors[target_index].GetDType())) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 02 12:40:29 UTC 2024
    - 47.2K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h

          for (float factor : tensor_property.derived_scale.factors) {
            scale *= factor;
          }
          spec->biases_params.emplace(
              index,
              std::make_pair(tensor_property.derived_scale.input_tensors,
                             GetUniformQuantizedTypeForBiasWithScale(scale)));
        }
      }
      return spec;
    }
    
    class ConvertSvdfStatsToQDQs : public ConvertOpStatsToQDQs<TFL::SVDFOp> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 28K bytes
    - Viewed (0)
Back to top