- Sort Score
- Result 10 results
- Languages All
Results 1 - 7 of 7 for in_shape (0.17 sec)
-
tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc
ArrayRef<int64_t> in_shape = ranked_type.getShape(); if (in_shape.empty() || in_shape[0] < 0) { return context_op->emitOpError() << "A map_outside_compilation op's input and output shapes must " "have rank at least one and the first dimension must be known."; } int64_t split_size = in_shape[0] / num_cores_per_replica; if (in_shape[0] % num_cores_per_replica != 0) {
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue Apr 30 21:25:12 UTC 2024 - 68.3K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py
x_signature = [ None if c not in contracting_dims else x_shape[cidx] for cidx, c in enumerate(x_labels) ] y_signature = [ None if c not in contracting_dims else y_shape[cidx] for cidx, c in enumerate(y_labels) ] return x_shape, y_shape, bias_shape, x_signature, y_signature def _create_einsum_model( self,
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Thu Mar 21 08:51:46 UTC 2024 - 51.2K bytes - Viewed (0) -
tensorflow/cc/gradients/math_grad.cc
auto x_shape = Shape(scope, x); auto output_shape = Shape(scope, op.output(0)); // Reduce away broadcasted leading dims. auto reduce_x = internal::BroadcastGradientArgs(scope, x_shape, output_shape); auto gx_sum = ReduceSum(scope, gx, /*axis=*/reduce_x.r0, ReduceSum::KeepDims(true)); auto gx_sum_reshape = Reshape(scope, gx_sum, x_shape);
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Fri Aug 25 18:20:20 UTC 2023 - 50.7K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/tensorflow/tests/quantize_composit_functions_debugging.mlir
// TF-DAG: %[[arg_quantized:.*]] = "tf.PartitionedCall"(%arg0, %[[in_scale]], %[[in_out_zp]]) <{config = "", config_proto = "", executor_type = "", f = @quantize_i8}>
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Mon Nov 06 01:23:21 UTC 2023 - 80.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py
) def test_einsum_ptq_model( self, equation: str, ): _, y_shape, bias_shape, x_signature, y_signature = ( self._prepare_sample_einsum_datashapes(equation, use_bias=True) ) model = self._create_einsum_model( self._input_saved_model_path, equation, y_shape, x_signature, y_signature, bias_shape, )
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Tue May 14 06:31:57 UTC 2024 - 51.4K bytes - Viewed (0) -
src/cmd/compile/internal/types/type.go
func (t *Type) Noalg() bool { return t.flags&typeNoalg != 0 } func (t *Type) Deferwidth() bool { return t.flags&typeDeferwidth != 0 } func (t *Type) Recur() bool { return t.flags&typeRecur != 0 } func (t *Type) IsShape() bool { return t.flags&typeIsShape != 0 } func (t *Type) HasShape() bool { return t.flags&typeHasShape != 0 } func (t *Type) SetNotInHeap(b bool) { t.flags.set(typeNotInHeap, b) }
Registered: Wed Jun 12 16:32:35 UTC 2024 - Last Modified: Thu Apr 04 14:29:45 UTC 2024 - 49.5K bytes - Viewed (0) -
tensorflow/compiler/mlir/g3doc/_includes/tf_passes.md
} return } ``` ### `-tf-rewrite-tpu-embedding-ops` _Rewrites TPU embedding send/recv ops by adding TPU embedding deduplication data_ ### `-tf-shape-inference` _Shape inference on TF dialect and ops implementing InferTypeOpInterface_ Fixed point shape refinement pass that utilizes the shape functions registered on ops using the InferTypeOpInterface as well as by bridging to
Registered: Sun Jun 16 05:45:23 UTC 2024 - Last Modified: Wed Aug 02 02:26:39 UTC 2023 - 96.4K bytes - Viewed (0)