- Sort Score
- Result 10 results
- Languages All
Results 21 - 24 of 24 for input_tensor (0.36 sec)
-
tensorflow/c/c_api_experimental_test.cc
} } TF_ShapeAndTypeList* output_shapes; TFE_InferShapes(op, input_shapes, input_tensors.empty() ? nullptr : const_cast<TF_Tensor**>(input_tensors.data()), /*input_tensors_as_shapes*/ nullptr, /*input_resource_shapes_and_types*/ nullptr, &output_shapes,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 17 22:27:52 UTC 2023 - 13.1K bytes - Viewed (0) -
tensorflow/c/c_api_experimental.h
// OK to not have the inputs properly set in `op`. See `input_tensors` // if you want shape inference to consider the input tensors of the // op for shape inference. // - The types need not be set in `input_shapes` as it is not used. // - The number of `input_tensors` should be the same as the number of items // in `input_shapes`. //
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 27 21:07:00 UTC 2023 - 15.1K bytes - Viewed (0) -
tensorflow/c/eager/tape.h
}); std::vector<Gradient*> in_grads; in_grads.reserve(input_tensors.size()); for (int target_index = 0; target_index < input_tensors.size(); ++target_index) { const auto current_grad = accumulated_gradients_.find(input_tensors[target_index].GetID()); if (current_grad == accumulated_gradients_.end()) { if (IsDtypeTrainable(input_tensors[target_index].GetDType())) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 02 12:40:29 UTC 2024 - 47.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/transforms/prepare_quantize_helper.h
for (float factor : tensor_property.derived_scale.factors) { scale *= factor; } spec->biases_params.emplace( index, std::make_pair(tensor_property.derived_scale.input_tensors, GetUniformQuantizedTypeForBiasWithScale(scale))); } } return spec; } class ConvertSvdfStatsToQDQs : public ConvertOpStatsToQDQs<TFL::SVDFOp> { public:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 03 18:01:23 UTC 2024 - 28K bytes - Viewed (0)