Search Options

Results per page
Sort
Preferred Languages
Advance

Results 31 - 40 of 213 for se_shape (0.21 sec)

  1. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test_base.py

              contracting_dims.add(c)
          x_signature = [
              None if c not in contracting_dims else x_shape[cidx]
              for cidx, c in enumerate(x_labels)
          ]
          y_signature = [
              None if c not in contracting_dims else y_shape[cidx]
              for cidx, c in enumerate(y_labels)
          ]
        return x_shape, y_shape, bias_shape, x_signature, y_signature
    
      def _create_einsum_model(
          self,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 18.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc

              // TensorArrayScatter `value`.
              auto t = scatter.getValue().getType().dyn_cast<RankedTensorType>();
              if (!t || t.getShape().empty()) return std::nullopt;
              return RankedTensorType::get(t.getShape().drop_front(),
                                           t.getElementType());
            } else if (auto gather =
                           llvm::dyn_cast<TF::TensorArrayGatherV3Op>(user)) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 02 20:41:19 UTC 2023
    - 40.2K bytes
    - Viewed (0)
  3. tensorflow/cc/gradients/math_grad.cc

      auto x_shape = Shape(scope, x);
      auto output_shape = Shape(scope, op.output(0));
    
      // Reduce away broadcasted leading dims.
      auto reduce_x = internal::BroadcastGradientArgs(scope, x_shape, output_shape);
      auto gx_sum =
          ReduceSum(scope, gx, /*axis=*/reduce_x.r0, ReduceSum::KeepDims(true));
      auto gx_sum_reshape = Reshape(scope, gx_sum, x_shape);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 25 18:20:20 UTC 2023
    - 50.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h

          auto scatter_dims_to_operand_dims =
              scatter_dimension_numbers.getScatterDimsToOperandDims();
    
          if (IsIotaAttr(inserted_window_dims, indices_type.getShape().back()) &&
              IsIotaAttr(scatter_dims_to_operand_dims,
                         indices_type.getShape().back())) {
            rewriter.replaceOpWithNewOp<TfOp>(scatter_op,
                                              scatter_op.getResult(0).getType(),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.1K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/tests/tf_to_hlo_pipeline/sccp-post-shape-inference.mlir

        %2 = "tf.PartitionedCall"(%1) {config = "", config_proto = "", executor_type = "", f = @get_shape} : (tensor<?x?xf32>) -> (tensor<?xi64>)
    
        // CHECK: return %[[RESULT]]
        func.return %2 : tensor<?xi64>
      }
    
      // CHECK-LABEL: func @get_shape
      func.func @get_shape(%arg0 : tensor<*xi64>) -> tensor<?xi64> {
        %0 = "tf.Shape"(%arg0) : (tensor<*xi64>) -> tensor<?xi64>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jul 25 02:54:34 UTC 2023
    - 1020 bytes
    - Viewed (0)
  6. tensorflow/compiler/jit/partially_decluster_pass.cc

    // a TensorFlow graph.
    //
    // Abstractly, if we have a cluster of this form:
    //
    //   x0 = arg0
    //   x1 = arg1
    //     ...
    //   shape = f(x0, x1, ...)
    //   result = Reshape(input=<something>, new_shape=shape)
    //
    // then pulling `f` out of the cluster may reduce the number of compilations and
    // will never increase the number of compilations.
    //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 09 11:36:41 UTC 2024
    - 15.7K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo.cc

          return failure();
    
        //
        // Transpose and reshape the input and kernel
        //
    
        // Reshape input image to add a new spatial dimension.
        auto image_type = mlir::cast<ShapedType>(conv_op.getLhs().getType());
        SmallVector<int64_t, 4> image_2d_shape(image_type.getShape().begin(),
                                               image_type.getShape().end());
        image_2d_shape.push_back(1);
        auto image_2d_type =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 154.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/internal/passes/extract_outside_compilation.cc

      ArrayRef<int64_t> in_shape = ranked_type.getShape();
      if (in_shape.empty() || in_shape[0] < 0) {
        return context_op->emitOpError()
               << "A map_outside_compilation op's input and output shapes must "
                  "have rank at least one and the first dimension must be known.";
      }
      int64_t split_size = in_shape[0] / num_cores_per_replica;
      if (in_shape[0] % num_cores_per_replica != 0) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Apr 30 21:25:12 UTC 2024
    - 68.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/collection_ops_util.cc

      auto element_type = tensorflow::GetTypeFromTFTensorShape(
          buffer_type.getShape().drop_front(), buffer_type.getElementType());
      auto reshape = builder.create<TF::ReshapeOp>(
          loc, ArrayRef<Type>{element_type},
          ArrayRef<Value>{slice,
                          GetR1Const(element_type.getShape(), builder, loc)});
      return reshape.getOutput();
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 14.5K bytes
    - Viewed (0)
  10. tensorflow/cc/gradients/linalg_grad_test.cc

      TensorShape y_shape({3, 3, 2});
      RunTest({x}, {x_shape}, {y}, {y_shape});
    }
    
    TEST_F(LinalgGradTest, Einsum_MatMul) {
      TensorShape x_shape({2, 3});
      TensorShape y_shape({3, 3});
      Output x = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(x_shape));
      Output y = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(y_shape));
      auto z = Einsum(scope_, {x, y}, "ij,jk->ik");
      TensorShape z_shape({2, 3});
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Mar 07 23:11:54 UTC 2022
    - 5.8K bytes
    - Viewed (0)
Back to top