- Sort Score
- Result 10 results
- Languages All
Results 11 - 20 of 47 for _einsum (0.12 sec)
-
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.td
[], (addBenefit 10)>; // Converts inlined Einsum pattern to TF XlaDotV2 op. def ConvertTFEinsumToXLADotV2Op : Pat< (TF_EinsumOp:$einsum $args, $equation), (CreateXlaDotV2OpFromTfEinsumOp $equation, $args, $einsum), [(IsInt32ElementType $einsum), // Constraint to check: // 1. The einsum has two inputs and one output. // 2. The einsum is not created by the convert function itself.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 21.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_tf_xla_op_to_tf_op.td
def IsPrecisionEmpty : Constraint<CPred<"IsPrecisionEmpty($0)">>; // Creates Einsum Op from XlaDotV2 Op by generating equation. def CreateEinsumOpFromXlaDotV2Op : NativeCodeCall< "CreateEinsumOpFromXlaDotV2Op($_builder, $_loc, $0...)">; // Convert XlaDotV2 Op to Einsum Op with above two functions. def ConvertXlaDotV2OpToEinsumOp : Pat<
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sun Dec 10 05:52:02 UTC 2023 - 2.3K bytes - Viewed (0) -
ci/official/containers/linux_arm64/jax.requirements.txt
# REQUIREMENTS_FILE=jax.requirements.txt setuptools wheel cloudpickle colorama>=0.4.4 matplotlib pillow>=9.1.0 rich absl-py portpicker six opt-einsum auditwheel typing_extensions importlib_metadata>=4.6 numpy==1.26.0;python_version=="3.12" numpy==1.23.4;python_version=="3.11" numpy==1.22.4;python_version<"3.11" scipy==1.11.2;python_version=="3.12"
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jan 30 20:02:17 UTC 2024 - 570 bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/batchmatmul_to_einsum.cc
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/core/util/matmul_bcast.h" namespace mlir { namespace TF { namespace { // Replace TF BatchMatMul by TF Einsum op template <typename BatchMatMulOpType> class ConvertTFBatchMatMulToEinsumOp : public OpRewritePattern<BatchMatMulOpType> { using OpRewritePattern<BatchMatMulOpType>::OpRewritePattern;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 3.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/verify_tfxla_legalization_test.cc
CreateModule(kNonStaticFailure); auto result = Run(); EXPECT_TRUE(result.succeeded()); EXPECT_EQ(static_error.Delta("mhlo.einsum"), 0); EXPECT_EQ(skipped.Delta("mhlo.einsum"), 1); } TEST_F(VerifyTfxlaLegalizationTest, SkipsNonStaticInputsWithBounds) { // Using a string constant here instead of testdata to make this compatible // with open source.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Sep 06 19:12:29 UTC 2023 - 7.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc
if (!value_type.hasRank()) return false; if (!value_type.getElementType().isInteger(integer_width)) return false; return true; } // Constraint to check: // 1. The einsum has two inputs and one output. // 2. The einsum is not created by the convert function itself. // 3. Both inputs are int32 tensor. // 4. Both inputs have the graph ancestor of either const-(sub), or cast-sub.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 47.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h
bool IsInLiftedFunc(Operation* op); // Checks if the op is inside a StableHLO op with region. // If the given op pointer is a nullptr, returns false. bool IsInStableHloOpRegion(Operation* op); // Checks if a given einsum op is supported for XlaDotV2 quantization. bool IsEinsumSupportedByXlaDotV2(StringAttr equation_attr); // Gets the quantization method from `op`. It is retrieved from the // `kQuantizationMethodAttr` string attribute. Returns
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 5.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/tests/verify-tfxla-legalization.mlir
%0 = mhlo.constant dense<(1.000000e+00,-1.000000e+00)> : tensor<128x32x4xcomplex<f32>> %1 = mhlo.constant dense<(1.000000e+00,1.000000e+00)> : tensor<8x64x128xcomplex<f32>> %2 = "mhlo.einsum"(%1, %0) <{einsum_config = "abc,cde->abde"}> : (tensor<8x64x128xcomplex<f32>>, tensor<128x32x4xcomplex<f32>>) -> tensor<8x64x32x4xcomplex<f32>> return %2 : tensor<8x64x32x4xcomplex<f32>> } // -----
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Apr 06 15:32:52 UTC 2024 - 3.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
} if (!is_weight_constant) { if (!function_name.contains("matmul") && !function_name.contains("einsum")) { return absl::InternalError( "Non-constant weights are not supported at the moment," " except matmul and einsum."); } else if (!quant_options_.enable_two_input_tensors() && !is_unitwise_quantization_enabled) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri May 10 04:07:09 UTC 2024 - 16.4K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/ops/tf_op_quant_spec.cc
function_name.contains("and_bias")) { spec->biases_params[2] = {{0, 1}, quant::GetUniformQuantizedTypeForBias}; } } else if (function_name.contains("einsum")) { spec->coeff_op_quant_dim[1] = -1; if (function_name.contains("with_bias")) { spec->biases_params[2] = {{0, 1}, quant::GetUniformQuantizedTypeForBias}; }
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 6.3K bytes - Viewed (0)