- Sort Score
- Result 10 results
- Languages All
Results 21 - 30 of 303 for Cast (0.1 sec)
-
tensorflow/compiler/mlir/lite/tests/prepare-tf.mlir
// CHECK-LABEL: add_v2_uint32 // CHECK: %[[CAST:.*]] = "tf.Cast"(%arg0) <{Truncate = false}> : (tensor<ui32>) -> tensor<i32> // CHECK: %[[CAST1:.*]] = "tf.Cast"(%arg1) <{Truncate = false}> : (tensor<ui32>) -> tensor<i32> // CHECK: %[[ADD:.*]] = "tf.AddV2"(%[[CAST]], %[[CAST1]]) : (tensor<i32>, tensor<i32>) -> tensor<i32> // CHECK: %[[CAST2:.*]] = "tf.Cast"(%[[ADD]]) <{Truncate = false}> : (tensor<i32>) -> tensor<ui32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed May 29 07:26:59 UTC 2024 - 59.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
dtypes.string, shape=(None,), name='input_vocabs' ) # Introduce a matmul op that takes the lookup values to observe the # effects of quantization. lookup_vals = math_ops.cast( table.lookup(input_vocabs_placeholder), dtypes.float32 ) # shape: (2, ?) matmul_input = array_ops_stack.stack([lookup_vals, lookup_vals]) # Create a dummy weight matrix filled with ones.
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
Output a = ops::Placeholder(root.WithOpName("test/a"), DT_INT64); Output b = ops::Placeholder(root.WithOpName("test/b"), DT_INT64); Output cast_a = ops::Cast(root.WithOpName("test/cast_a"), a, DT_INT32); Output cast_b = ops::Cast(root.WithOpName("test/cast_b"), b, DT_INT32); Output tensor_list_reserve = ops::TensorListReserve( root.WithOpName("test/tensor_list_reserve"), cast_a, cast_b, DT_FLOAT);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Feb 14 10:11:10 UTC 2024 - 79.6K bytes - Viewed (0) -
platforms/core-configuration/model-core/src/main/java/org/gradle/internal/instantiation/generator/AbstractClassGenerator.java
import org.gradle.api.provider.SupportsConvention; import org.gradle.api.reflect.InjectionPointQualifier; import org.gradle.api.tasks.Nested; import org.gradle.cache.internal.CrossBuildInMemoryCache; import org.gradle.internal.Cast; import org.gradle.internal.extensibility.NoConventionMapping; import org.gradle.internal.instantiation.ClassGenerationException; import org.gradle.internal.instantiation.InjectAnnotationHandler;
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon May 06 21:54:37 UTC 2024 - 63K bytes - Viewed (0) -
testing/performance/src/templates/native-dependents-resources/googleTest/libs/googleTest/1.7.0/include/gtest/internal/gtest-port.h
template<typename To> inline To ImplicitCast_(To x) { return x; } // When you upcast (that is, cast a pointer from type Foo to type // SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts // always succeed. When you downcast (that is, cast a pointer from // type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Thu Apr 04 07:21:38 UTC 2024 - 67.2K bytes - Viewed (0) -
platforms/documentation/docs/src/snippets/native-binaries/google-test/groovy/libs/googleTest/1.7.0/include/gtest/internal/gtest-port.h
template<typename To> inline To ImplicitCast_(To x) { return x; } // When you upcast (that is, cast a pointer from type Foo to type // SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts // always succeed. When you downcast (that is, cast a pointer from // type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
Registered: Wed Jun 12 18:38:38 UTC 2024 - Last Modified: Mon Nov 27 17:53:42 UTC 2023 - 67.2K bytes - Viewed (0) -
guava/src/com/google/common/collect/ImmutableSortedMap.java
// even though K doesn't explicitly implement Comparable. comparator = (Comparator<? super K>) NATURAL_ORDER; } if (map instanceof ImmutableSortedMap) { // TODO(kevinb): Prove that this cast is safe, even though // Collections.unmodifiableSortedMap requires the same key type. @SuppressWarnings("unchecked") ImmutableSortedMap<K, V> kvMap = (ImmutableSortedMap<K, V>) map;
Registered: Wed Jun 12 16:38:11 UTC 2024 - Last Modified: Thu Feb 22 21:19:52 UTC 2024 - 50.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir
%val = "tf.TensorArrayReadV3"(%ta#0, %index, %ta#1) : (tensor<!tf_type.resource<tensor<*xf32>>>, tensor<i32>, tensor<f32>) -> tensor<*xf32> // CHECK: %[[CAST:.*]] = tensor.cast %[[ELEM]] : tensor<3xf32> to tensor<*xf32> // CHECK: return %[[CAST]] : tensor<*xf32> func.return %val : tensor<*xf32> } // ----- // Test CaseRegion with gradient inside PartitionedCall Op. The gradient local
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 49K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
// 2) Any resource stores except the last one. // TODO(ycao): Store-load forwarding implemented here is only correct when // computation is purely sequential (no concurrency). Need to support concurrent // computation as well. void ForwardStoreToLoad(Block* block) { // resource_handle_to_last_store_op keeps track of the most recent (last) // store to each resource. Non-existent entry indicates that a resource has
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Apr 25 16:01:03 UTC 2024 - 55.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tfr/python/tfr_gen.py
rhs, rhs_ty = self.visit(right) if isinstance(op, ast.Eq): pred = 'eq' elif isinstance(op, ast.Lt): pred = 'ult' elif isinstance(op, ast.LtE): pred = 'ule' elif isinstance(op, ast.Gt): pred = 'ugt' elif isinstance(op, ast.GtE): pred = 'uge' elif isinstance(op, ast.NotEq): pred = 'ne' else:
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 27 15:27:03 UTC 2022 - 55.8K bytes - Viewed (0)