- Sort Score
- Result 10 results
- Languages All
Results 101 - 107 of 107 for 64xf32 (1.22 sec)
-
tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td
// (i.e., after converting reference types to their corresponding TensorFlow or // standard types). Also, this allows compatible types so it is legal to have // tensor<*xf32> and tensor<4xf32> types. def TF_SameOperandsAndResultTypeResolveRef : TraitList< InferTensorType.traits # [ NativeOpTrait<"TF::SameOperandsAndResultTypeResolveRef"> ]>;
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 30.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/lite/stablehlo/tests/uniform-quantized-stablehlo-to-tfl.mlir
// `tfl.reshape`. func.func @dynamic_reshape_float(%arg0: tensor<?x3xf32>, %arg1: tensor<2xi32>) -> tensor<?x?xf32> { %0 = "stablehlo.dynamic_reshape"(%arg0, %arg1) : (tensor<?x3xf32>, tensor<2xi32>) -> tensor<?x?xf32> return %0 : tensor<?x?xf32> } // CHECK-LABEL: func @dynamic_reshape_float // CHECK: stablehlo.dynamic_reshape // CHECK-NOT: tfl.reshape
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 17:10:32 UTC 2024 - 106.2K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h
// Casts the `target` type to a quantized type by using the quantization // parameters from the type in the `source` type attribute. // Examples: // f32 -> !quant.uniform<i8:f32, 1.0> // tensor<4xf32> -> tensor<4x!quant.uniform<i8:f32, 1.0>> // The result is wrapped by a type attribute. Returns nullptr if the cast // isn't valid. // // `axis` is to specify the quantization dimension in the `target` and only
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Apr 24 20:30:06 UTC 2024 - 41.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir
// CHECK: %[[VAL:.*]] = "tf.Const"() <{value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00]> : tensor<3xf32>}> : () -> tensor<3xf32> %value = "tf.Const"() {value = dense<[1.0, 2.0, 3.0]> : tensor<3xf32>} : () -> tensor<3xf32> // CHECK: %[[READ_VAR:.*]] = "tf.ReadVariableOp"(%[[VAR]]) // CHECK: %[[UPDATE_SLICE:.*]] = "tf.Reshape"(%[[VAL]] // CHECK-SAME: -> tensor<1x3xf32>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Oct 30 06:52:55 UTC 2023 - 49K bytes - Viewed (0) -
tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc
/// : (tensor<f32>, tensor<f32>, tensor<f32>) -> tensor<5xf32> /// /// Output would be: /// %iota = "mhlo.iota"() {iota_dimension = 0 : i64} : () -> tensor<5xf32> /// %scaled = "mhlo.multiply"(%iota, %delta) /// {broadcast_dimensions = dense<[]> : tensor<0xi64>} : /// (tensor<5xf32>, tensor<f32>) -> tensor<5xf32> /// %result = "mhlo.add"(%scaled, %offset)
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 20:00:43 UTC 2024 - 291.8K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
} else { // Recurse on the subtypes in the variant/resource. Basically if the input // were: // tensor<!tf_type.variant<tensor<?x8xf32>>> // and: // tensor<!tf_type.variant<tensor<10x8xf32>>> // we'll try here to refine tensor<?x8xf32> with tensor<10x8xf32>. auto refined_subtype = mlir::cast<TensorType>( TypeMeet(lhs_element_type_with_subtype.GetSubtypes().front(),
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Sat Jun 08 07:28:49 UTC 2024 - 134.1K bytes - Viewed (0) -
tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op mlir_module = '''python func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> { %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32> return %ret : tensor<10x10xf32> } ''' @tf.function def foo(x, y):
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Jun 11 23:24:08 UTC 2024 - 793K bytes - Viewed (0)