In the builtin str class, all of the methods return .... For example, the first bit of the string class:
class str(Sequence[str]):
    @overload
    def __new__(cls: Type[_T], o: object = ...) -> _T: ...
    @overload
    def __new__(cls: Type[_T], o: bytes, encoding: str = ..., errors: str = ...) -> _T: ...
    def capitalize(self) -> str: ...
    def casefold(self) -> str: ...
    def center(self, __width: int, __fillchar: str = ...) -> str: ...
    def count(self, x: str, __start: Optional[SupportsIndex] = ..., __end: Optional[SupportsIndex] = ...) -> int: ...
    def encode(self, encoding: str = ..., errors: str = ...) -> bytes: ...
    def endswith(
        self, __suffix: Union[str, Tuple[str, ...]], __start: Optional[SupportsIndex] = ..., __end: Optional[SupportsIndex] = ...
    ) -> bool: ...
    def expandtabs(self, tabsize: int = ...) -> str: ...
    def find(self, __sub: str, __start: Optional[SupportsIndex] = ..., __end: Optional[SupportsIndex] = ...) -> int: ...
    def format(self, *args: object, **kwargs: object) -> str: ...
    def format_map(self, map: _FormatMapMapping) -> str: ...
    def index(self, __sub: str, __start: Optional[SupportsIndex] = ..., __end: Optional[SupportsIndex] = ...) -> int: ...
    def isalnum(self) -> bool: ...
    def isalpha(self) -> bool: ...
    if sys.version_info >= (3, 7):
        def isascii(self) -> bool: ...
    def isdecimal(self) -> bool: ...
    def isdigit(self) -> bool: ...
    def isidentifier(self) -> bool: ...
    def islower(self) -> bool: ...
    def isnumeric(self) -> bool: ...
    def isprintable(self) -> bool: ...
    def isspace(self) -> bool: ...
    def istitle(self) -> bool: ...
    def isupper(self) -> bool: ...
    def join(self, __iterable: Iterable[str]) -> str: ...
    def ljust(self, __width: int, __fillchar: str = ...) -> str: ...
    def lower(self) -> str: ...
    def lstrip(self, __chars: Optional[str] = ...) -> str: ...
What does this mean, and why do they return it, rather than anything else?
 
    