- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for input_tensor (0.3 sec)
-
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
@def_function.function def matmul(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: """Performs a matrix multiplication. Args: input_tensor: Input tensor to matmul with the filter. Returns: A 'output' -> output tensor mapping """ out = math_ops.matmul(input_tensor, random_tensor_gen_fn((2, 3)))
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
tensor_spec.TensorSpec( shape=[None], dtype=input_type, name='input_tensor' ) ] ) def model(self, input_tensor: core.Tensor) -> Mapping[str, core.Tensor]: """Performs a gather and a 2D convolution operation. Args: input_tensor: Input tensor to perform operation on. Returns:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc
ASSERT_THAT(weights_zero_points, SizeIs(out_channel_size)); ASSERT_THAT(input_tensor->quantization->scale, SizeIs(1)); ASSERT_THAT(output_tensor->quantization->scale, SizeIs(1)); const float eps = 1e-7; // Bias scale should be input * per_channel_weight_scale. for (size_t i = 0; i < out_channel_size; i++) { EXPECT_THAT(bias_scales[i], FloatNear(input_tensor->quantization->scale[0] *
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Jun 12 23:15:24 UTC 2024 - 73.9K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/flatbuffer_import.cc
// Get or construct MLIR values for each input for (int i = 0, e = func_inputs.size(); i < e; i++) { auto input_tensor = func_inputs[i]; const auto& tensor = *subgraph.tensors.at(input_tensor); auto loc = TensorLoc(tensor, builder, base_loc); if (vals_map[input_tensor]) { auto err = errors::FailedPrecondition("duplicate input arguments"); return emitError(loc, err.ToString()), err; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 21 18:21:50 UTC 2024 - 66.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops.mlir
} // ----- module attributes {tf_saved_model.semantics} {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 81K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composit_functions_debugging.mlir
} // TODO(b/308773062): Add whole_model unit-test // ----- module { func.func @matmul2_with_int_per_layer(%arg0: tensor<2x2xf32> {tf_saved_model.index_path = ["input_tensor"]}) -> tensor<2x2xf32> { %cst = "tf.Const"() {device = "", value = dense<[[-0.630731344, 0.54962182], [0.180364341, -0.764542698]]> : tensor<2x2xf32>} : () -> tensor<2x2xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Nov 06 01:23:21 UTC 2023 - 80.5K bytes - Viewed (0) -
tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
} } } struct OutputInputTensorPairHasher { uint64 operator()(std::pair<OutputTensor, InputTensor> const& s) const { return Hash64Combine(OutputTensor::Hash()(s.first), InputTensor::Hash()(s.second)); } }; // TODO(phawkins) add a canonical copy of these operator names and refactor // everything to use it.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Feb 22 08:47:20 UTC 2024 - 51K bytes - Viewed (0)