Search Options

Results per page
Sort
Preferred Languages
Advance

Results 21 - 30 of 45 for getShape (0.19 sec)

  1. tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc

          std::vector<float> scales_inv;
          std::vector<int32_t> dimension;
          dimension.insert(dimension.end(), new_dense_type.getShape().begin(),
                           new_dense_type.getShape().end());
          std::transform(uniform_type.getScales().begin(),
                         uniform_type.getScales().end(),
                         std::back_inserter(scales_inv),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 08 02:10:16 UTC 2024
    - 43.2K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tfr/ir/tfr_ops.cc

        bool same_element_type = output_tensor_type.getElementType() ==
                                 input_vector_type.getElementType();
        bool same_shape =
            output_tensor_type.getShape() == input_vector_type.getShape();
        if (!same_element_type || !same_shape) {
          op.emitError("input and output should have same shape and element type.");
        }
        return success(same_element_type && same_shape);
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Nov 21 16:55:41 UTC 2023
    - 38.2K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/host_runtime/tpu_metadata_utils.cc

        if (auto ranked_tensor_type =
                mlir::dyn_cast<RankedTensorType>(operand_type)) {
          tensorflow::TensorShapeProto shape_proto;
          ConvertToTensorShapeProto(ranked_tensor_type.getShape(), &shape_proto);
          *arg->mutable_shape() = std::move(shape_proto);
        } else {
          arg->mutable_shape()->set_unknown_rank(true);
        }
    
        if (failed(SetOpSharding(op, input_shardings.getValue()[index],
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/transforms/dilated_conv.h

        // Connect `expand_op` with the input of `stb_op`.
        expand_op.setOperand(0, stb_op.getInput());
        // Calculate the shape for expand.
        auto input_shape =
            mlir::cast<ShapedType>(stb_op.getInput().getType()).getShape();
        SmallVector<int64_t, 4> expand_shape(input_shape.begin(),
                                             input_shape.end());
        expand_shape.insert(expand_shape.begin() + expand_axis, 1);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/passes/defer_activation_transpose.cc

        const auto result_type = mlir::cast<TensorType>(op.getResult(0).getType());
        const SmallVector<int64_t> new_result_shape =
            Permute<int64_t>(result_type.getShape(), kNchwToNhwcPermutation);
    
        const TensorType new_result_type =
            result_type.cloneWith(new_result_shape, result_type.getElementType());
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.5K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/stablehlo/transforms/fold_broadcast_pass.cc

                                      DenseIntElementsAttr bcast_dims) {
      auto dimensions = llvm::to_vector(bcast_dims.getValues<int64_t>());
      const auto result_shape = result_type.getShape();
      // Index for the broadcasted matrix.
      llvm::SmallVector<int64_t, 16> current_index(result_type.getRank(), 0);
      // Computes the new operand shape using the original shape and the broadcast
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/tensor_list_ops_decomposition.cc

      if (!variant_type || variant_type.getSubtypes().size() != 1) return failure();
      TensorType tensor_type = variant_type.getSubtypes().front();
      if (!tensor_type.hasStaticShape()) return failure();
      for (auto d : tensor_type.getShape()) shape->push_back(d);
      return success();
    }
    
    LogicalResult HandleEmptyTensorListOp(
        TF::EmptyTensorListOp list,
        llvm::SmallDenseMap<Value, SizeInfo>* buffer_to_size) {
      Value buffer;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 39.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tf2xla/internal/passes/xla_broadcast.cc

        zero = builder.getFloatAttr(elem_type, 0);
      } else {
        return false;
      }
      if (auto ranked_type = dyn_cast<RankedTensorType>(type)) {
        llvm::ArrayRef<int64_t> type_shape = ranked_type.getShape();
        for (int64_t i : type_shape) {
          if (i < 0) return false;
        }
        shape = builder.getI64TensorAttr(type_shape);
      } else {
        return false;
      }
      return true;
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 13 18:52:07 UTC 2024
    - 13.9K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/ir/tf_op_base.td

      } else {
        SmallVector<int64_t, 4> resultShape;
        if (!OpTrait::util::getBroadcastedShape(
                x.getType().cast<ShapedType>().getShape(),
                y.getType().cast<ShapedType>().getShape(), resultShape)) {
          mlir::emitError($_state.location,
                          "operands have no broadcastable shapes");
        }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 30.5K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc

    GetBroadcastShapesForBatchMatmul(ShapedType input_type,
                                     ShapedType weight_type) {
      ArrayRef<int64_t> input_shape = input_type.getShape();
      ArrayRef<int64_t> weight_shape = weight_type.getShape();
    
      const int64_t num_matmul_dim = 2;
      const int64_t num_input_batch_dim = input_type.getRank() - num_matmul_dim;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 47.1K bytes
    - Viewed (0)
Back to top