aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/python/pandas/py3
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.com>2024-03-04 21:16:16 +0300
committershadchin <shadchin@yandex-team.com>2024-03-04 21:43:39 +0300
commit74819c4157bd388a7d429c870ea4b343a282dafa (patch)
tree4bff355b03dfb24b14d33581357cc8e624d170fd /contrib/python/pandas/py3
parentf64c28a5443395e3a8f27e6f1b15a3507812d2de (diff)
downloadydb-74819c4157bd388a7d429c870ea4b343a282dafa.tar.gz
Extend support pyi files
Сейчас pyi файлы в макросе PY_SRCS используются исключительно в Arcadia плагине для продуктов JB, при сборке эти файлы просто игнорируются. В этом PR добавил шаг, который будет содержимое этих файлов складывать в ресурсы, секция PY_SRCS удобна тем, что позволяет раскладывать pyi файлы с учетом TOP_LEVEL/NAMESPACE, а это необходимо для правильной работы mypy. 3924b0556bc99947e6893cd79e5ce62ec72a18a9
Diffstat (limited to 'contrib/python/pandas/py3')
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/algos.pyi420
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/arrays.pyi34
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/groupby.pyi191
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/hashing.pyi9
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/hashtable.pyi251
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/index.pyi105
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/indexing.pyi17
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/internals.pyi102
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/interval.pyi174
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/join.pyi78
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/json.pyi23
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/lib.pyi250
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/missing.pyi17
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/ops.pyi50
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/ops_dispatch.pyi5
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/parsers.pyi75
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/properties.pyi27
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/reduction.pyi6
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/reshape.pyi16
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/sparse.pyi49
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/testing.pyi12
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslib.pyi32
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/ccalendar.pyi12
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/conversion.pyi14
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/dtypes.pyi81
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/fields.pyi62
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/nattype.pyi132
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/np_datetime.pyi21
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/offsets.pyi279
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/parsing.pyi38
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/period.pyi127
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/strptime.pyi13
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/timedeltas.pyi163
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/timestamps.pyi228
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/timezones.pyi21
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/tzconversion.pyi21
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/tslibs/vectorized.pyi43
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/window/aggregations.pyi127
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/window/indexers.pyi12
-rw-r--r--contrib/python/pandas/py3/pandas/_libs/writers.pyi20
-rw-r--r--contrib/python/pandas/py3/pandas/io/sas/_byteswap.pyi5
-rw-r--r--contrib/python/pandas/py3/pandas/io/sas/_sas.pyi7
42 files changed, 3369 insertions, 0 deletions
diff --git a/contrib/python/pandas/py3/pandas/_libs/algos.pyi b/contrib/python/pandas/py3/pandas/_libs/algos.pyi
new file mode 100644
index 00000000000..20a805533e8
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/algos.pyi
@@ -0,0 +1,420 @@
+from typing import Any
+
+import numpy as np
+
+from pandas._typing import npt
+
+class Infinity:
+ """
+ Provide a positive Infinity comparison method for ranking.
+ """
+
+ def __eq__(self, other) -> bool: ...
+ def __ne__(self, other) -> bool: ...
+ def __lt__(self, other) -> bool: ...
+ def __le__(self, other) -> bool: ...
+ def __gt__(self, other) -> bool: ...
+ def __ge__(self, other) -> bool: ...
+
+class NegInfinity:
+ """
+ Provide a negative Infinity comparison method for ranking.
+ """
+
+ def __eq__(self, other) -> bool: ...
+ def __ne__(self, other) -> bool: ...
+ def __lt__(self, other) -> bool: ...
+ def __le__(self, other) -> bool: ...
+ def __gt__(self, other) -> bool: ...
+ def __ge__(self, other) -> bool: ...
+
+def unique_deltas(
+ arr: np.ndarray, # const int64_t[:]
+) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1]
+def is_lexsorted(list_of_arrays: list[npt.NDArray[np.int64]]) -> bool: ...
+def groupsort_indexer(
+ index: np.ndarray, # const int64_t[:]
+ ngroups: int,
+) -> tuple[
+ np.ndarray, # ndarray[int64_t, ndim=1]
+ np.ndarray, # ndarray[int64_t, ndim=1]
+]: ...
+def kth_smallest(
+ arr: np.ndarray, # numeric[:]
+ k: int,
+) -> Any: ... # numeric
+
+# ----------------------------------------------------------------------
+# Pairwise correlation/covariance
+
+def nancorr(
+ mat: npt.NDArray[np.float64], # const float64_t[:, :]
+ cov: bool = ...,
+ minp: int | None = ...,
+) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2]
+def nancorr_spearman(
+ mat: npt.NDArray[np.float64], # ndarray[float64_t, ndim=2]
+ minp: int = ...,
+) -> npt.NDArray[np.float64]: ... # ndarray[float64_t, ndim=2]
+
+# ----------------------------------------------------------------------
+
+def validate_limit(nobs: int | None, limit=...) -> int: ...
+def pad(
+ old: np.ndarray, # ndarray[numeric_object_t]
+ new: np.ndarray, # ndarray[numeric_object_t]
+ limit=...,
+) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1]
+def pad_inplace(
+ values: np.ndarray, # numeric_object_t[:]
+ mask: np.ndarray, # uint8_t[:]
+ limit=...,
+) -> None: ...
+def pad_2d_inplace(
+ values: np.ndarray, # numeric_object_t[:, :]
+ mask: np.ndarray, # const uint8_t[:, :]
+ limit=...,
+) -> None: ...
+def backfill(
+ old: np.ndarray, # ndarray[numeric_object_t]
+ new: np.ndarray, # ndarray[numeric_object_t]
+ limit=...,
+) -> npt.NDArray[np.intp]: ... # np.ndarray[np.intp, ndim=1]
+def backfill_inplace(
+ values: np.ndarray, # numeric_object_t[:]
+ mask: np.ndarray, # uint8_t[:]
+ limit=...,
+) -> None: ...
+def backfill_2d_inplace(
+ values: np.ndarray, # numeric_object_t[:, :]
+ mask: np.ndarray, # const uint8_t[:, :]
+ limit=...,
+) -> None: ...
+def is_monotonic(
+ arr: np.ndarray, # ndarray[numeric_object_t, ndim=1]
+ timelike: bool,
+) -> tuple[bool, bool, bool]: ...
+
+# ----------------------------------------------------------------------
+# rank_1d, rank_2d
+# ----------------------------------------------------------------------
+
+def rank_1d(
+ values: np.ndarray, # ndarray[numeric_object_t, ndim=1]
+ labels: np.ndarray | None = ..., # const int64_t[:]=None
+ is_datetimelike: bool = ...,
+ ties_method=...,
+ ascending: bool = ...,
+ pct: bool = ...,
+ na_option=...,
+ mask: npt.NDArray[np.bool_] | None = ...,
+) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1]
+def rank_2d(
+ in_arr: np.ndarray, # ndarray[numeric_object_t, ndim=2]
+ axis: int = ...,
+ is_datetimelike: bool = ...,
+ ties_method=...,
+ ascending: bool = ...,
+ na_option=...,
+ pct: bool = ...,
+) -> np.ndarray: ... # np.ndarray[float64_t, ndim=1]
+def diff_2d(
+ arr: np.ndarray, # ndarray[diff_t, ndim=2]
+ out: np.ndarray, # ndarray[out_t, ndim=2]
+ periods: int,
+ axis: int,
+ datetimelike: bool = ...,
+) -> None: ...
+def ensure_platform_int(arr: object) -> npt.NDArray[np.intp]: ...
+def ensure_object(arr: object) -> npt.NDArray[np.object_]: ...
+def ensure_float64(arr: object) -> npt.NDArray[np.float64]: ...
+def ensure_int8(arr: object) -> npt.NDArray[np.int8]: ...
+def ensure_int16(arr: object) -> npt.NDArray[np.int16]: ...
+def ensure_int32(arr: object) -> npt.NDArray[np.int32]: ...
+def ensure_int64(arr: object) -> npt.NDArray[np.int64]: ...
+def ensure_uint64(arr: object) -> npt.NDArray[np.uint64]: ...
+def take_1d_int8_int8(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int8_int32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int8_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int8_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int16_int16(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int16_int32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int16_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int16_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int32_int32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int32_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int32_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int64_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_int64_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_float32_float32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_float32_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_float64_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_object_object(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_bool_bool(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_1d_bool_object(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int8_int8(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int8_int32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int8_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int8_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int16_int16(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int16_int32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int16_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int16_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int32_int32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int32_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int32_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int64_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_int64_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_float32_float32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_float32_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_float64_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_object_object(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_bool_bool(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis0_bool_object(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int8_int8(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int8_int32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int8_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int8_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int16_int16(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int16_int32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int16_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int16_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int32_int32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int32_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int32_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int64_int64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_int64_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_float32_float32(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_float32_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_float64_float64(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_object_object(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_bool_bool(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_axis1_bool_object(
+ values: np.ndarray, indexer: npt.NDArray[np.intp], out: np.ndarray, fill_value=...
+) -> None: ...
+def take_2d_multi_int8_int8(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int8_int32(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int8_int64(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int8_float64(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int16_int16(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int16_int32(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int16_int64(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int16_float64(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int32_int32(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int32_int64(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int32_float64(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int64_float64(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_float32_float32(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_float32_float64(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_float64_float64(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_object_object(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_bool_bool(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_bool_object(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
+def take_2d_multi_int64_int64(
+ values: np.ndarray,
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
+ out: np.ndarray,
+ fill_value=...,
+) -> None: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/arrays.pyi b/contrib/python/pandas/py3/pandas/_libs/arrays.pyi
new file mode 100644
index 00000000000..c9350ed9b8a
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/arrays.pyi
@@ -0,0 +1,34 @@
+from typing import Sequence
+
+import numpy as np
+
+from pandas._typing import (
+ DtypeObj,
+ Shape,
+)
+
+class NDArrayBacked:
+ _dtype: DtypeObj
+ _ndarray: np.ndarray
+ def __init__(self, values: np.ndarray, dtype: DtypeObj) -> None: ...
+ @classmethod
+ def _simple_new(cls, values: np.ndarray, dtype: DtypeObj): ...
+ def _from_backing_data(self, values: np.ndarray): ...
+ def __setstate__(self, state): ...
+ def __len__(self) -> int: ...
+ @property
+ def shape(self) -> Shape: ...
+ @property
+ def ndim(self) -> int: ...
+ @property
+ def size(self) -> int: ...
+ @property
+ def nbytes(self) -> int: ...
+ def copy(self): ...
+ def delete(self, loc, axis=...): ...
+ def swapaxes(self, axis1, axis2): ...
+ def repeat(self, repeats: int | Sequence[int], axis: int | None = ...): ...
+ def reshape(self, *args, **kwargs): ...
+ def ravel(self, order=...): ...
+ @property
+ def T(self): ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/groupby.pyi b/contrib/python/pandas/py3/pandas/_libs/groupby.pyi
new file mode 100644
index 00000000000..e3ca9c44d56
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/groupby.pyi
@@ -0,0 +1,191 @@
+from typing import Literal
+
+import numpy as np
+
+from pandas._typing import npt
+
+def group_median_float64(
+ out: np.ndarray, # ndarray[float64_t, ndim=2]
+ counts: npt.NDArray[np.int64],
+ values: np.ndarray, # ndarray[float64_t, ndim=2]
+ labels: npt.NDArray[np.int64],
+ min_count: int = ..., # Py_ssize_t
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+) -> None: ...
+def group_cumprod(
+ out: np.ndarray, # float64_t[:, ::1]
+ values: np.ndarray, # const float64_t[:, :]
+ labels: np.ndarray, # const int64_t[:]
+ ngroups: int,
+ is_datetimelike: bool,
+ skipna: bool = ...,
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+) -> None: ...
+def group_cumsum(
+ out: np.ndarray, # int64float_t[:, ::1]
+ values: np.ndarray, # ndarray[int64float_t, ndim=2]
+ labels: np.ndarray, # const int64_t[:]
+ ngroups: int,
+ is_datetimelike: bool,
+ skipna: bool = ...,
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+) -> None: ...
+def group_shift_indexer(
+ out: np.ndarray, # int64_t[::1]
+ labels: np.ndarray, # const int64_t[:]
+ ngroups: int,
+ periods: int,
+) -> None: ...
+def group_fillna_indexer(
+ out: np.ndarray, # ndarray[intp_t]
+ labels: np.ndarray, # ndarray[int64_t]
+ sorted_labels: npt.NDArray[np.intp],
+ mask: npt.NDArray[np.uint8],
+ direction: Literal["ffill", "bfill"],
+ limit: int, # int64_t
+ dropna: bool,
+) -> None: ...
+def group_any_all(
+ out: np.ndarray, # uint8_t[::1]
+ values: np.ndarray, # const uint8_t[::1]
+ labels: np.ndarray, # const int64_t[:]
+ mask: np.ndarray, # const uint8_t[::1]
+ val_test: Literal["any", "all"],
+ skipna: bool,
+ nullable: bool,
+) -> None: ...
+def group_sum(
+ out: np.ndarray, # complexfloatingintuint_t[:, ::1]
+ counts: np.ndarray, # int64_t[::1]
+ values: np.ndarray, # ndarray[complexfloatingintuint_t, ndim=2]
+ labels: np.ndarray, # const intp_t[:]
+ mask: np.ndarray | None,
+ result_mask: np.ndarray | None = ...,
+ min_count: int = ...,
+ is_datetimelike: bool = ...,
+) -> None: ...
+def group_prod(
+ out: np.ndarray, # int64float_t[:, ::1]
+ counts: np.ndarray, # int64_t[::1]
+ values: np.ndarray, # ndarray[int64float_t, ndim=2]
+ labels: np.ndarray, # const intp_t[:]
+ mask: np.ndarray | None,
+ result_mask: np.ndarray | None = ...,
+ min_count: int = ...,
+) -> None: ...
+def group_var(
+ out: np.ndarray, # floating[:, ::1]
+ counts: np.ndarray, # int64_t[::1]
+ values: np.ndarray, # ndarray[floating, ndim=2]
+ labels: np.ndarray, # const intp_t[:]
+ min_count: int = ..., # Py_ssize_t
+ ddof: int = ..., # int64_t
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+ is_datetimelike: bool = ...,
+) -> None: ...
+def group_mean(
+ out: np.ndarray, # floating[:, ::1]
+ counts: np.ndarray, # int64_t[::1]
+ values: np.ndarray, # ndarray[floating, ndim=2]
+ labels: np.ndarray, # const intp_t[:]
+ min_count: int = ..., # Py_ssize_t
+ is_datetimelike: bool = ..., # bint
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+) -> None: ...
+def group_ohlc(
+ out: np.ndarray, # floatingintuint_t[:, ::1]
+ counts: np.ndarray, # int64_t[::1]
+ values: np.ndarray, # ndarray[floatingintuint_t, ndim=2]
+ labels: np.ndarray, # const intp_t[:]
+ min_count: int = ...,
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+) -> None: ...
+def group_quantile(
+ out: npt.NDArray[np.float64],
+ values: np.ndarray, # ndarray[numeric, ndim=1]
+ labels: npt.NDArray[np.intp],
+ mask: npt.NDArray[np.uint8],
+ sort_indexer: npt.NDArray[np.intp], # const
+ qs: npt.NDArray[np.float64], # const
+ interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"],
+ result_mask: np.ndarray | None = ...,
+) -> None: ...
+def group_last(
+ out: np.ndarray, # rank_t[:, ::1]
+ counts: np.ndarray, # int64_t[::1]
+ values: np.ndarray, # ndarray[rank_t, ndim=2]
+ labels: np.ndarray, # const int64_t[:]
+ mask: npt.NDArray[np.bool_] | None,
+ result_mask: npt.NDArray[np.bool_] | None = ...,
+ min_count: int = ..., # Py_ssize_t
+ is_datetimelike: bool = ...,
+) -> None: ...
+def group_nth(
+ out: np.ndarray, # rank_t[:, ::1]
+ counts: np.ndarray, # int64_t[::1]
+ values: np.ndarray, # ndarray[rank_t, ndim=2]
+ labels: np.ndarray, # const int64_t[:]
+ mask: npt.NDArray[np.bool_] | None,
+ result_mask: npt.NDArray[np.bool_] | None = ...,
+ min_count: int = ..., # int64_t
+ rank: int = ..., # int64_t
+ is_datetimelike: bool = ...,
+) -> None: ...
+def group_rank(
+ out: np.ndarray, # float64_t[:, ::1]
+ values: np.ndarray, # ndarray[rank_t, ndim=2]
+ labels: np.ndarray, # const int64_t[:]
+ ngroups: int,
+ is_datetimelike: bool,
+ ties_method: Literal["average", "min", "max", "first", "dense"] = ...,
+ ascending: bool = ...,
+ pct: bool = ...,
+ na_option: Literal["keep", "top", "bottom"] = ...,
+ mask: npt.NDArray[np.bool_] | None = ...,
+) -> None: ...
+def group_max(
+ out: np.ndarray, # groupby_t[:, ::1]
+ counts: np.ndarray, # int64_t[::1]
+ values: np.ndarray, # ndarray[groupby_t, ndim=2]
+ labels: np.ndarray, # const int64_t[:]
+ min_count: int = ...,
+ is_datetimelike: bool = ...,
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+) -> None: ...
+def group_min(
+ out: np.ndarray, # groupby_t[:, ::1]
+ counts: np.ndarray, # int64_t[::1]
+ values: np.ndarray, # ndarray[groupby_t, ndim=2]
+ labels: np.ndarray, # const int64_t[:]
+ min_count: int = ...,
+ is_datetimelike: bool = ...,
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+) -> None: ...
+def group_cummin(
+ out: np.ndarray, # groupby_t[:, ::1]
+ values: np.ndarray, # ndarray[groupby_t, ndim=2]
+ labels: np.ndarray, # const int64_t[:]
+ ngroups: int,
+ is_datetimelike: bool,
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+ skipna: bool = ...,
+) -> None: ...
+def group_cummax(
+ out: np.ndarray, # groupby_t[:, ::1]
+ values: np.ndarray, # ndarray[groupby_t, ndim=2]
+ labels: np.ndarray, # const int64_t[:]
+ ngroups: int,
+ is_datetimelike: bool,
+ mask: np.ndarray | None = ...,
+ result_mask: np.ndarray | None = ...,
+ skipna: bool = ...,
+) -> None: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/hashing.pyi b/contrib/python/pandas/py3/pandas/_libs/hashing.pyi
new file mode 100644
index 00000000000..8361026e4a8
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/hashing.pyi
@@ -0,0 +1,9 @@
+import numpy as np
+
+from pandas._typing import npt
+
+def hash_object_array(
+ arr: npt.NDArray[np.object_],
+ key: str,
+ encoding: str = ...,
+) -> npt.NDArray[np.uint64]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/hashtable.pyi b/contrib/python/pandas/py3/pandas/_libs/hashtable.pyi
new file mode 100644
index 00000000000..2bc6d74fe6a
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/hashtable.pyi
@@ -0,0 +1,251 @@
+from typing import (
+ Any,
+ Hashable,
+ Literal,
+)
+
+import numpy as np
+
+from pandas._typing import npt
+
+def unique_label_indices(
+ labels: np.ndarray, # const int64_t[:]
+) -> np.ndarray: ...
+
+class Factorizer:
+ count: int
+ uniques: Any
+ def __init__(self, size_hint: int) -> None: ...
+ def get_count(self) -> int: ...
+ def factorize(
+ self,
+ values: np.ndarray,
+ sort: bool = ...,
+ na_sentinel=...,
+ na_value=...,
+ mask=...,
+ ) -> npt.NDArray[np.intp]: ...
+
+class ObjectFactorizer(Factorizer):
+ table: PyObjectHashTable
+ uniques: ObjectVector
+
+class Int64Factorizer(Factorizer):
+ table: Int64HashTable
+ uniques: Int64Vector
+
+class UInt64Factorizer(Factorizer):
+ table: UInt64HashTable
+ uniques: UInt64Vector
+
+class Int32Factorizer(Factorizer):
+ table: Int32HashTable
+ uniques: Int32Vector
+
+class UInt32Factorizer(Factorizer):
+ table: UInt32HashTable
+ uniques: UInt32Vector
+
+class Int16Factorizer(Factorizer):
+ table: Int16HashTable
+ uniques: Int16Vector
+
+class UInt16Factorizer(Factorizer):
+ table: UInt16HashTable
+ uniques: UInt16Vector
+
+class Int8Factorizer(Factorizer):
+ table: Int8HashTable
+ uniques: Int8Vector
+
+class UInt8Factorizer(Factorizer):
+ table: UInt8HashTable
+ uniques: UInt8Vector
+
+class Float64Factorizer(Factorizer):
+ table: Float64HashTable
+ uniques: Float64Vector
+
+class Float32Factorizer(Factorizer):
+ table: Float32HashTable
+ uniques: Float32Vector
+
+class Complex64Factorizer(Factorizer):
+ table: Complex64HashTable
+ uniques: Complex64Vector
+
+class Complex128Factorizer(Factorizer):
+ table: Complex128HashTable
+ uniques: Complex128Vector
+
+class Int64Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.int64]: ...
+
+class Int32Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.int32]: ...
+
+class Int16Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.int16]: ...
+
+class Int8Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.int8]: ...
+
+class UInt64Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.uint64]: ...
+
+class UInt32Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.uint32]: ...
+
+class UInt16Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.uint16]: ...
+
+class UInt8Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.uint8]: ...
+
+class Float64Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.float64]: ...
+
+class Float32Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.float32]: ...
+
+class Complex128Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.complex128]: ...
+
+class Complex64Vector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.complex64]: ...
+
+class StringVector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.object_]: ...
+
+class ObjectVector:
+ def __init__(self, *args) -> None: ...
+ def __len__(self) -> int: ...
+ def to_array(self) -> npt.NDArray[np.object_]: ...
+
+class HashTable:
+ # NB: The base HashTable class does _not_ actually have these methods;
+ # we are putting them here for the sake of mypy to avoid
+ # reproducing them in each subclass below.
+ def __init__(self, size_hint: int = ..., uses_mask: bool = ...) -> None: ...
+ def __len__(self) -> int: ...
+ def __contains__(self, key: Hashable) -> bool: ...
+ def sizeof(self, deep: bool = ...) -> int: ...
+ def get_state(self) -> dict[str, int]: ...
+ # TODO: `item` type is subclass-specific
+ def get_item(self, item): ... # TODO: return type?
+ def set_item(self, item, val) -> None: ...
+ def get_na(self): ... # TODO: return type?
+ def set_na(self, val) -> None: ...
+ def map_locations(
+ self,
+ values: np.ndarray, # np.ndarray[subclass-specific]
+ mask: npt.NDArray[np.bool_] | None = ...,
+ ) -> None: ...
+ def lookup(
+ self,
+ values: np.ndarray, # np.ndarray[subclass-specific]
+ mask: npt.NDArray[np.bool_] | None = ...,
+ ) -> npt.NDArray[np.intp]: ...
+ def get_labels(
+ self,
+ values: np.ndarray, # np.ndarray[subclass-specific]
+ uniques, # SubclassTypeVector
+ count_prior: int = ...,
+ na_sentinel: int = ...,
+ na_value: object = ...,
+ mask=...,
+ ) -> npt.NDArray[np.intp]: ...
+ def unique(
+ self,
+ values: np.ndarray, # np.ndarray[subclass-specific]
+ return_inverse: bool = ...,
+ ) -> (
+ tuple[
+ np.ndarray, # np.ndarray[subclass-specific]
+ npt.NDArray[np.intp],
+ ]
+ | np.ndarray
+ ): ... # np.ndarray[subclass-specific]
+ def factorize(
+ self,
+ values: np.ndarray, # np.ndarray[subclass-specific]
+ na_sentinel: int = ...,
+ na_value: object = ...,
+ mask=...,
+ ) -> tuple[np.ndarray, npt.NDArray[np.intp]]: ... # np.ndarray[subclass-specific]
+
+class Complex128HashTable(HashTable): ...
+class Complex64HashTable(HashTable): ...
+class Float64HashTable(HashTable): ...
+class Float32HashTable(HashTable): ...
+
+class Int64HashTable(HashTable):
+ # Only Int64HashTable has get_labels_groupby, map_keys_to_values
+ def get_labels_groupby(
+ self,
+ values: npt.NDArray[np.int64], # const int64_t[:]
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.int64]]: ...
+ def map_keys_to_values(
+ self,
+ keys: npt.NDArray[np.int64],
+ values: npt.NDArray[np.int64], # const int64_t[:]
+ ) -> None: ...
+
+class Int32HashTable(HashTable): ...
+class Int16HashTable(HashTable): ...
+class Int8HashTable(HashTable): ...
+class UInt64HashTable(HashTable): ...
+class UInt32HashTable(HashTable): ...
+class UInt16HashTable(HashTable): ...
+class UInt8HashTable(HashTable): ...
+class StringHashTable(HashTable): ...
+class PyObjectHashTable(HashTable): ...
+class IntpHashTable(HashTable): ...
+
+def duplicated(
+ values: np.ndarray,
+ keep: Literal["last", "first", False] = ...,
+ mask: npt.NDArray[np.bool_] | None = ...,
+) -> npt.NDArray[np.bool_]: ...
+def mode(
+ values: np.ndarray, dropna: bool, mask: npt.NDArray[np.bool_] | None = ...
+) -> np.ndarray: ...
+def value_count(
+ values: np.ndarray,
+ dropna: bool,
+ mask: npt.NDArray[np.bool_] | None = ...,
+) -> tuple[np.ndarray, npt.NDArray[np.int64]]: ... # np.ndarray[same-as-values]
+
+# arr and values should have same dtype
+def ismember(
+ arr: np.ndarray,
+ values: np.ndarray,
+) -> npt.NDArray[np.bool_]: ...
+def object_hash(obj) -> int: ...
+def objects_are_equal(a, b) -> bool: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/index.pyi b/contrib/python/pandas/py3/pandas/_libs/index.pyi
new file mode 100644
index 00000000000..e08faaaa031
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/index.pyi
@@ -0,0 +1,105 @@
+import numpy as np
+
+from pandas._typing import npt
+
+from pandas import MultiIndex
+from pandas.core.arrays import ExtensionArray
+
+class IndexEngine:
+ over_size_threshold: bool
+ def __init__(self, values: np.ndarray) -> None: ...
+ def __contains__(self, val: object) -> bool: ...
+
+ # -> int | slice | np.ndarray[bool]
+ def get_loc(self, val: object) -> int | slice | np.ndarray: ...
+ def sizeof(self, deep: bool = ...) -> int: ...
+ def __sizeof__(self) -> int: ...
+ @property
+ def is_unique(self) -> bool: ...
+ @property
+ def is_monotonic_increasing(self) -> bool: ...
+ @property
+ def is_monotonic_decreasing(self) -> bool: ...
+ @property
+ def is_mapping_populated(self) -> bool: ...
+ def clear_mapping(self): ...
+ def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ...
+ def get_indexer_non_unique(
+ self,
+ targets: np.ndarray,
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+
+class MaskedIndexEngine(IndexEngine):
+ def __init__(self, values: object) -> None: ...
+ def get_indexer_non_unique(
+ self, targets: object
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+
+class Float64Engine(IndexEngine): ...
+class Float32Engine(IndexEngine): ...
+class Complex128Engine(IndexEngine): ...
+class Complex64Engine(IndexEngine): ...
+class Int64Engine(IndexEngine): ...
+class Int32Engine(IndexEngine): ...
+class Int16Engine(IndexEngine): ...
+class Int8Engine(IndexEngine): ...
+class UInt64Engine(IndexEngine): ...
+class UInt32Engine(IndexEngine): ...
+class UInt16Engine(IndexEngine): ...
+class UInt8Engine(IndexEngine): ...
+class ObjectEngine(IndexEngine): ...
+class DatetimeEngine(Int64Engine): ...
+class TimedeltaEngine(DatetimeEngine): ...
+class PeriodEngine(Int64Engine): ...
+class BoolEngine(UInt8Engine): ...
+class MaskedFloat64Engine(MaskedIndexEngine): ...
+class MaskedFloat32Engine(MaskedIndexEngine): ...
+class MaskedComplex128Engine(MaskedIndexEngine): ...
+class MaskedComplex64Engine(MaskedIndexEngine): ...
+class MaskedInt64Engine(MaskedIndexEngine): ...
+class MaskedInt32Engine(MaskedIndexEngine): ...
+class MaskedInt16Engine(MaskedIndexEngine): ...
+class MaskedInt8Engine(MaskedIndexEngine): ...
+class MaskedUInt64Engine(MaskedIndexEngine): ...
+class MaskedUInt32Engine(MaskedIndexEngine): ...
+class MaskedUInt16Engine(MaskedIndexEngine): ...
+class MaskedUInt8Engine(MaskedIndexEngine): ...
+class MaskedBoolEngine(MaskedUInt8Engine): ...
+
+class BaseMultiIndexCodesEngine:
+ levels: list[np.ndarray]
+ offsets: np.ndarray # ndarray[uint64_t, ndim=1]
+
+ def __init__(
+ self,
+ levels: list[np.ndarray], # all entries hashable
+ labels: list[np.ndarray], # all entries integer-dtyped
+ offsets: np.ndarray, # np.ndarray[np.uint64, ndim=1]
+ ) -> None: ...
+ def get_indexer(self, target: npt.NDArray[np.object_]) -> npt.NDArray[np.intp]: ...
+ def _extract_level_codes(self, target: MultiIndex) -> np.ndarray: ...
+ def get_indexer_with_fill(
+ self,
+ target: np.ndarray, # np.ndarray[object] of tuples
+ values: np.ndarray, # np.ndarray[object] of tuples
+ method: str,
+ limit: int | None,
+ ) -> npt.NDArray[np.intp]: ...
+
+class ExtensionEngine:
+ def __init__(self, values: ExtensionArray) -> None: ...
+ def __contains__(self, val: object) -> bool: ...
+ def get_loc(self, val: object) -> int | slice | np.ndarray: ...
+ def get_indexer(self, values: np.ndarray) -> npt.NDArray[np.intp]: ...
+ def get_indexer_non_unique(
+ self,
+ targets: np.ndarray,
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+ @property
+ def is_unique(self) -> bool: ...
+ @property
+ def is_monotonic_increasing(self) -> bool: ...
+ @property
+ def is_monotonic_decreasing(self) -> bool: ...
+ def sizeof(self, deep: bool = ...) -> int: ...
+ def clear_mapping(self): ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/indexing.pyi b/contrib/python/pandas/py3/pandas/_libs/indexing.pyi
new file mode 100644
index 00000000000..3ae5c5044a2
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/indexing.pyi
@@ -0,0 +1,17 @@
+from typing import (
+ Generic,
+ TypeVar,
+)
+
+from pandas.core.indexing import IndexingMixin
+
+_IndexingMixinT = TypeVar("_IndexingMixinT", bound=IndexingMixin)
+
+class NDFrameIndexerBase(Generic[_IndexingMixinT]):
+ name: str
+ # in practice obj is either a DataFrame or a Series
+ obj: _IndexingMixinT
+
+ def __init__(self, name: str, obj: _IndexingMixinT) -> None: ...
+ @property
+ def ndim(self) -> int: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/internals.pyi b/contrib/python/pandas/py3/pandas/_libs/internals.pyi
new file mode 100644
index 00000000000..cee96801290
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/internals.pyi
@@ -0,0 +1,102 @@
+from typing import (
+ Iterator,
+ Sequence,
+ final,
+ overload,
+)
+import weakref
+
+import numpy as np
+
+from pandas._typing import (
+ ArrayLike,
+ T,
+ npt,
+)
+
+from pandas import Index
+from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
+from pandas.core.internals.blocks import Block as B
+
+def slice_len(slc: slice, objlen: int = ...) -> int: ...
+def get_blkno_indexers(
+ blknos: np.ndarray, # int64_t[:]
+ group: bool = ...,
+) -> list[tuple[int, slice | np.ndarray]]: ...
+def get_blkno_placements(
+ blknos: np.ndarray,
+ group: bool = ...,
+) -> Iterator[tuple[int, BlockPlacement]]: ...
+def update_blklocs_and_blknos(
+ blklocs: npt.NDArray[np.intp],
+ blknos: npt.NDArray[np.intp],
+ loc: int,
+ nblocks: int,
+) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+@final
+class BlockPlacement:
+ def __init__(self, val: int | slice | np.ndarray) -> None: ...
+ @property
+ def indexer(self) -> np.ndarray | slice: ...
+ @property
+ def as_array(self) -> np.ndarray: ...
+ @property
+ def as_slice(self) -> slice: ...
+ @property
+ def is_slice_like(self) -> bool: ...
+ @overload
+ def __getitem__(
+ self, loc: slice | Sequence[int] | npt.NDArray[np.intp]
+ ) -> BlockPlacement: ...
+ @overload
+ def __getitem__(self, loc: int) -> int: ...
+ def __iter__(self) -> Iterator[int]: ...
+ def __len__(self) -> int: ...
+ def delete(self, loc) -> BlockPlacement: ...
+ def append(self, others: list[BlockPlacement]) -> BlockPlacement: ...
+ def tile_for_unstack(self, factor: int) -> npt.NDArray[np.intp]: ...
+
+class SharedBlock:
+ _mgr_locs: BlockPlacement
+ ndim: int
+ values: ArrayLike
+ refs: BlockValuesRefs
+ def __init__(
+ self,
+ values: ArrayLike,
+ placement: BlockPlacement,
+ ndim: int,
+ refs: BlockValuesRefs | None = ...,
+ ) -> None: ...
+
+class NumpyBlock(SharedBlock):
+ values: np.ndarray
+ @final
+ def getitem_block_index(self: T, slicer: slice) -> T: ...
+
+class NDArrayBackedBlock(SharedBlock):
+ values: NDArrayBackedExtensionArray
+ @final
+ def getitem_block_index(self: T, slicer: slice) -> T: ...
+
+class Block(SharedBlock): ...
+
+class BlockManager:
+ blocks: tuple[B, ...]
+ axes: list[Index]
+ _known_consolidated: bool
+ _is_consolidated: bool
+ _blknos: np.ndarray
+ _blklocs: np.ndarray
+ def __init__(
+ self, blocks: tuple[B, ...], axes: list[Index], verify_integrity=...
+ ) -> None: ...
+ def get_slice(self: T, slobj: slice, axis: int = ...) -> T: ...
+ def _rebuild_blknos_and_blklocs(self) -> None: ...
+
+class BlockValuesRefs:
+ referenced_blocks: list[weakref.ref]
+ def __init__(self, blk: SharedBlock | None = ...) -> None: ...
+ def add_reference(self, blk: SharedBlock) -> None: ...
+ def add_index_reference(self, index: object) -> None: ...
+ def has_reference(self) -> bool: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/interval.pyi b/contrib/python/pandas/py3/pandas/_libs/interval.pyi
new file mode 100644
index 00000000000..4c36246e04d
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/interval.pyi
@@ -0,0 +1,174 @@
+from typing import (
+ Any,
+ Generic,
+ TypeVar,
+ overload,
+)
+
+import numpy as np
+import numpy.typing as npt
+
+from pandas._typing import (
+ IntervalClosedType,
+ Timedelta,
+ Timestamp,
+)
+
+VALID_CLOSED: frozenset[str]
+
+_OrderableScalarT = TypeVar("_OrderableScalarT", int, float)
+_OrderableTimesT = TypeVar("_OrderableTimesT", Timestamp, Timedelta)
+_OrderableT = TypeVar("_OrderableT", int, float, Timestamp, Timedelta)
+
+class _LengthDescriptor:
+ @overload
+ def __get__(
+ self, instance: Interval[_OrderableScalarT], owner: Any
+ ) -> _OrderableScalarT: ...
+ @overload
+ def __get__(
+ self, instance: Interval[_OrderableTimesT], owner: Any
+ ) -> Timedelta: ...
+
+class _MidDescriptor:
+ @overload
+ def __get__(self, instance: Interval[_OrderableScalarT], owner: Any) -> float: ...
+ @overload
+ def __get__(
+ self, instance: Interval[_OrderableTimesT], owner: Any
+ ) -> _OrderableTimesT: ...
+
+class IntervalMixin:
+ @property
+ def closed_left(self) -> bool: ...
+ @property
+ def closed_right(self) -> bool: ...
+ @property
+ def open_left(self) -> bool: ...
+ @property
+ def open_right(self) -> bool: ...
+ @property
+ def is_empty(self) -> bool: ...
+ def _check_closed_matches(self, other: IntervalMixin, name: str = ...) -> None: ...
+
+class Interval(IntervalMixin, Generic[_OrderableT]):
+ @property
+ def left(self: Interval[_OrderableT]) -> _OrderableT: ...
+ @property
+ def right(self: Interval[_OrderableT]) -> _OrderableT: ...
+ @property
+ def closed(self) -> IntervalClosedType: ...
+ mid: _MidDescriptor
+ length: _LengthDescriptor
+ def __init__(
+ self,
+ left: _OrderableT,
+ right: _OrderableT,
+ closed: IntervalClosedType = ...,
+ ) -> None: ...
+ def __hash__(self) -> int: ...
+ @overload
+ def __contains__(
+ self: Interval[Timedelta], key: Timedelta | Interval[Timedelta]
+ ) -> bool: ...
+ @overload
+ def __contains__(
+ self: Interval[Timestamp], key: Timestamp | Interval[Timestamp]
+ ) -> bool: ...
+ @overload
+ def __contains__(
+ self: Interval[_OrderableScalarT],
+ key: _OrderableScalarT | Interval[_OrderableScalarT],
+ ) -> bool: ...
+ @overload
+ def __add__(
+ self: Interval[_OrderableTimesT], y: Timedelta
+ ) -> Interval[_OrderableTimesT]: ...
+ @overload
+ def __add__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
+ @overload
+ def __add__(self: Interval[float], y: float) -> Interval[float]: ...
+ @overload
+ def __radd__(
+ self: Interval[_OrderableTimesT], y: Timedelta
+ ) -> Interval[_OrderableTimesT]: ...
+ @overload
+ def __radd__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
+ @overload
+ def __radd__(self: Interval[float], y: float) -> Interval[float]: ...
+ @overload
+ def __sub__(
+ self: Interval[_OrderableTimesT], y: Timedelta
+ ) -> Interval[_OrderableTimesT]: ...
+ @overload
+ def __sub__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
+ @overload
+ def __sub__(self: Interval[float], y: float) -> Interval[float]: ...
+ @overload
+ def __rsub__(
+ self: Interval[_OrderableTimesT], y: Timedelta
+ ) -> Interval[_OrderableTimesT]: ...
+ @overload
+ def __rsub__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
+ @overload
+ def __rsub__(self: Interval[float], y: float) -> Interval[float]: ...
+ @overload
+ def __mul__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
+ @overload
+ def __mul__(self: Interval[float], y: float) -> Interval[float]: ...
+ @overload
+ def __rmul__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
+ @overload
+ def __rmul__(self: Interval[float], y: float) -> Interval[float]: ...
+ @overload
+ def __truediv__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
+ @overload
+ def __truediv__(self: Interval[float], y: float) -> Interval[float]: ...
+ @overload
+ def __floordiv__(
+ self: Interval[int], y: _OrderableScalarT
+ ) -> Interval[_OrderableScalarT]: ...
+ @overload
+ def __floordiv__(self: Interval[float], y: float) -> Interval[float]: ...
+ def overlaps(self: Interval[_OrderableT], other: Interval[_OrderableT]) -> bool: ...
+
+def intervals_to_interval_bounds(
+ intervals: np.ndarray, validate_closed: bool = ...
+) -> tuple[np.ndarray, np.ndarray, str]: ...
+
+class IntervalTree(IntervalMixin):
+ def __init__(
+ self,
+ left: np.ndarray,
+ right: np.ndarray,
+ closed: IntervalClosedType = ...,
+ leaf_size: int = ...,
+ ) -> None: ...
+ @property
+ def mid(self) -> np.ndarray: ...
+ @property
+ def length(self) -> np.ndarray: ...
+ def get_indexer(self, target) -> npt.NDArray[np.intp]: ...
+ def get_indexer_non_unique(
+ self, target
+ ) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+ _na_count: int
+ @property
+ def is_overlapping(self) -> bool: ...
+ @property
+ def is_monotonic_increasing(self) -> bool: ...
+ def clear_mapping(self) -> None: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/join.pyi b/contrib/python/pandas/py3/pandas/_libs/join.pyi
new file mode 100644
index 00000000000..11b65b85909
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/join.pyi
@@ -0,0 +1,78 @@
+import numpy as np
+
+from pandas._typing import npt
+
+def inner_join(
+ left: np.ndarray, # const intp_t[:]
+ right: np.ndarray, # const intp_t[:]
+ max_groups: int,
+) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+def left_outer_join(
+ left: np.ndarray, # const intp_t[:]
+ right: np.ndarray, # const intp_t[:]
+ max_groups: int,
+ sort: bool = ...,
+) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+def full_outer_join(
+ left: np.ndarray, # const intp_t[:]
+ right: np.ndarray, # const intp_t[:]
+ max_groups: int,
+) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+def ffill_indexer(
+ indexer: np.ndarray, # const intp_t[:]
+) -> npt.NDArray[np.intp]: ...
+def left_join_indexer_unique(
+ left: np.ndarray, # ndarray[join_t]
+ right: np.ndarray, # ndarray[join_t]
+) -> npt.NDArray[np.intp]: ...
+def left_join_indexer(
+ left: np.ndarray, # ndarray[join_t]
+ right: np.ndarray, # ndarray[join_t]
+) -> tuple[
+ np.ndarray, # np.ndarray[join_t]
+ npt.NDArray[np.intp],
+ npt.NDArray[np.intp],
+]: ...
+def inner_join_indexer(
+ left: np.ndarray, # ndarray[join_t]
+ right: np.ndarray, # ndarray[join_t]
+) -> tuple[
+ np.ndarray, # np.ndarray[join_t]
+ npt.NDArray[np.intp],
+ npt.NDArray[np.intp],
+]: ...
+def outer_join_indexer(
+ left: np.ndarray, # ndarray[join_t]
+ right: np.ndarray, # ndarray[join_t]
+) -> tuple[
+ np.ndarray, # np.ndarray[join_t]
+ npt.NDArray[np.intp],
+ npt.NDArray[np.intp],
+]: ...
+def asof_join_backward_on_X_by_Y(
+ left_values: np.ndarray, # asof_t[:]
+ right_values: np.ndarray, # asof_t[:]
+ left_by_values: np.ndarray, # by_t[:]
+ right_by_values: np.ndarray, # by_t[:]
+ allow_exact_matches: bool = ...,
+ tolerance: np.number | float | None = ...,
+ use_hashtable: bool = ...,
+) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+def asof_join_forward_on_X_by_Y(
+ left_values: np.ndarray, # asof_t[:]
+ right_values: np.ndarray, # asof_t[:]
+ left_by_values: np.ndarray, # by_t[:]
+ right_by_values: np.ndarray, # by_t[:]
+ allow_exact_matches: bool = ...,
+ tolerance: np.number | float | None = ...,
+ use_hashtable: bool = ...,
+) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
+def asof_join_nearest_on_X_by_Y(
+ left_values: np.ndarray, # asof_t[:]
+ right_values: np.ndarray, # asof_t[:]
+ left_by_values: np.ndarray, # by_t[:]
+ right_by_values: np.ndarray, # by_t[:]
+ allow_exact_matches: bool = ...,
+ tolerance: np.number | float | None = ...,
+ use_hashtable: bool = ...,
+) -> tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/json.pyi b/contrib/python/pandas/py3/pandas/_libs/json.pyi
new file mode 100644
index 00000000000..8e7ba60ccce
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/json.pyi
@@ -0,0 +1,23 @@
+from typing import (
+ Any,
+ Callable,
+)
+
+def dumps(
+ obj: Any,
+ ensure_ascii: bool = ...,
+ double_precision: int = ...,
+ indent: int = ...,
+ orient: str = ...,
+ date_unit: str = ...,
+ iso_dates: bool = ...,
+ default_handler: None
+ | Callable[[Any], str | float | bool | list | dict | None] = ...,
+) -> str: ...
+def loads(
+ s: str,
+ precise_float: bool = ...,
+ numpy: bool = ...,
+ dtype: None = ...,
+ labelled: bool = ...,
+) -> Any: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/lib.pyi b/contrib/python/pandas/py3/pandas/_libs/lib.pyi
new file mode 100644
index 00000000000..fbc577712d2
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/lib.pyi
@@ -0,0 +1,250 @@
+# TODO(npdtypes): Many types specified here can be made more specific/accurate;
+# the more specific versions are specified in comments
+
+from typing import (
+ Any,
+ Callable,
+ Final,
+ Generator,
+ Hashable,
+ Literal,
+ overload,
+)
+
+import numpy as np
+
+from pandas._typing import (
+ ArrayLike,
+ DtypeObj,
+ npt,
+)
+
+# placeholder until we can specify np.ndarray[object, ndim=2]
+ndarray_obj_2d = np.ndarray
+
+from enum import Enum
+
+class _NoDefault(Enum):
+ no_default = ...
+
+no_default: Final = _NoDefault.no_default
+NoDefault = Literal[_NoDefault.no_default]
+
+i8max: int
+u8max: int
+
+def item_from_zerodim(val: object) -> object: ...
+def infer_dtype(value: object, skipna: bool = ...) -> str: ...
+def is_iterator(obj: object) -> bool: ...
+def is_scalar(val: object) -> bool: ...
+def is_list_like(obj: object, allow_sets: bool = ...) -> bool: ...
+def is_period(val: object) -> bool: ...
+def is_interval(val: object) -> bool: ...
+def is_decimal(val: object) -> bool: ...
+def is_complex(val: object) -> bool: ...
+def is_bool(val: object) -> bool: ...
+def is_integer(val: object) -> bool: ...
+def is_float(val: object) -> bool: ...
+def is_interval_array(values: np.ndarray) -> bool: ...
+def is_datetime64_array(values: np.ndarray) -> bool: ...
+def is_timedelta_or_timedelta64_array(values: np.ndarray) -> bool: ...
+def is_datetime_with_singletz_array(values: np.ndarray) -> bool: ...
+def is_time_array(values: np.ndarray, skipna: bool = ...): ...
+def is_date_array(values: np.ndarray, skipna: bool = ...): ...
+def is_datetime_array(values: np.ndarray, skipna: bool = ...): ...
+def is_string_array(values: np.ndarray, skipna: bool = ...): ...
+def is_float_array(values: np.ndarray, skipna: bool = ...): ...
+def is_integer_array(values: np.ndarray, skipna: bool = ...): ...
+def is_bool_array(values: np.ndarray, skipna: bool = ...): ...
+def fast_multiget(mapping: dict, keys: np.ndarray, default=...) -> np.ndarray: ...
+def fast_unique_multiple_list_gen(gen: Generator, sort: bool = ...) -> list: ...
+def fast_unique_multiple_list(lists: list, sort: bool | None = ...) -> list: ...
+def map_infer(
+ arr: np.ndarray,
+ f: Callable[[Any], Any],
+ convert: bool = ...,
+ ignore_na: bool = ...,
+) -> np.ndarray: ...
+@overload # all convert_foo False -> only convert numeric
+def maybe_convert_objects(
+ objects: npt.NDArray[np.object_],
+ *,
+ try_float: bool = ...,
+ safe: bool = ...,
+ convert_numeric: bool = ...,
+ convert_datetime: Literal[False] = ...,
+ convert_timedelta: Literal[False] = ...,
+ convert_period: Literal[False] = ...,
+ convert_interval: Literal[False] = ...,
+ convert_to_nullable_dtype: Literal[False] = ...,
+ dtype_if_all_nat: DtypeObj | None = ...,
+) -> npt.NDArray[np.object_ | np.number]: ...
+@overload # both convert_datetime and convert_to_nullable_integer False -> np.ndarray
+def maybe_convert_objects(
+ objects: npt.NDArray[np.object_],
+ *,
+ try_float: bool = ...,
+ safe: bool = ...,
+ convert_numeric: bool = ...,
+ convert_datetime: Literal[False] = ...,
+ convert_timedelta: bool = ...,
+ convert_period: Literal[False] = ...,
+ convert_interval: Literal[False] = ...,
+ convert_to_nullable_dtype: Literal[False] = ...,
+ dtype_if_all_nat: DtypeObj | None = ...,
+) -> np.ndarray: ...
+@overload
+def maybe_convert_objects(
+ objects: npt.NDArray[np.object_],
+ *,
+ try_float: bool = ...,
+ safe: bool = ...,
+ convert_numeric: bool = ...,
+ convert_datetime: bool = ...,
+ convert_timedelta: bool = ...,
+ convert_period: bool = ...,
+ convert_interval: bool = ...,
+ convert_to_nullable_dtype: Literal[True] = ...,
+ dtype_if_all_nat: DtypeObj | None = ...,
+) -> ArrayLike: ...
+@overload
+def maybe_convert_objects(
+ objects: npt.NDArray[np.object_],
+ *,
+ try_float: bool = ...,
+ safe: bool = ...,
+ convert_numeric: bool = ...,
+ convert_datetime: Literal[True] = ...,
+ convert_timedelta: bool = ...,
+ convert_period: bool = ...,
+ convert_interval: bool = ...,
+ convert_to_nullable_dtype: bool = ...,
+ dtype_if_all_nat: DtypeObj | None = ...,
+) -> ArrayLike: ...
+@overload
+def maybe_convert_objects(
+ objects: npt.NDArray[np.object_],
+ *,
+ try_float: bool = ...,
+ safe: bool = ...,
+ convert_numeric: bool = ...,
+ convert_datetime: bool = ...,
+ convert_timedelta: bool = ...,
+ convert_period: Literal[True] = ...,
+ convert_interval: bool = ...,
+ convert_to_nullable_dtype: bool = ...,
+ dtype_if_all_nat: DtypeObj | None = ...,
+) -> ArrayLike: ...
+@overload
+def maybe_convert_objects(
+ objects: npt.NDArray[np.object_],
+ *,
+ try_float: bool = ...,
+ safe: bool = ...,
+ convert_numeric: bool = ...,
+ convert_datetime: bool = ...,
+ convert_timedelta: bool = ...,
+ convert_period: bool = ...,
+ convert_interval: bool = ...,
+ convert_to_nullable_dtype: bool = ...,
+ dtype_if_all_nat: DtypeObj | None = ...,
+) -> ArrayLike: ...
+@overload
+def maybe_convert_numeric(
+ values: npt.NDArray[np.object_],
+ na_values: set,
+ convert_empty: bool = ...,
+ coerce_numeric: bool = ...,
+ convert_to_masked_nullable: Literal[False] = ...,
+) -> tuple[np.ndarray, None]: ...
+@overload
+def maybe_convert_numeric(
+ values: npt.NDArray[np.object_],
+ na_values: set,
+ convert_empty: bool = ...,
+ coerce_numeric: bool = ...,
+ *,
+ convert_to_masked_nullable: Literal[True],
+) -> tuple[np.ndarray, np.ndarray]: ...
+
+# TODO: restrict `arr`?
+def ensure_string_array(
+ arr,
+ na_value: object = ...,
+ convert_na_value: bool = ...,
+ copy: bool = ...,
+ skipna: bool = ...,
+) -> npt.NDArray[np.object_]: ...
+def convert_nans_to_NA(
+ arr: npt.NDArray[np.object_],
+) -> npt.NDArray[np.object_]: ...
+def fast_zip(ndarrays: list) -> npt.NDArray[np.object_]: ...
+
+# TODO: can we be more specific about rows?
+def to_object_array_tuples(rows: object) -> ndarray_obj_2d: ...
+def tuples_to_object_array(
+ tuples: npt.NDArray[np.object_],
+) -> ndarray_obj_2d: ...
+
+# TODO: can we be more specific about rows?
+def to_object_array(rows: object, min_width: int = ...) -> ndarray_obj_2d: ...
+def dicts_to_array(dicts: list, columns: list) -> ndarray_obj_2d: ...
+def maybe_booleans_to_slice(
+ mask: npt.NDArray[np.uint8],
+) -> slice | npt.NDArray[np.uint8]: ...
+def maybe_indices_to_slice(
+ indices: npt.NDArray[np.intp],
+ max_len: int,
+) -> slice | npt.NDArray[np.intp]: ...
+def is_all_arraylike(obj: list) -> bool: ...
+
+# -----------------------------------------------------------------
+# Functions which in reality take memoryviews
+
+def memory_usage_of_objects(arr: np.ndarray) -> int: ... # object[:] # np.int64
+def map_infer_mask(
+ arr: np.ndarray,
+ f: Callable[[Any], Any],
+ mask: np.ndarray, # const uint8_t[:]
+ convert: bool = ...,
+ na_value: Any = ...,
+ dtype: np.dtype = ...,
+) -> np.ndarray: ...
+def indices_fast(
+ index: npt.NDArray[np.intp],
+ labels: np.ndarray, # const int64_t[:]
+ keys: list,
+ sorted_labels: list[npt.NDArray[np.int64]],
+) -> dict[Hashable, npt.NDArray[np.intp]]: ...
+def generate_slices(
+ labels: np.ndarray, ngroups: int # const intp_t[:]
+) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ...
+def count_level_2d(
+ mask: np.ndarray, # ndarray[uint8_t, ndim=2, cast=True],
+ labels: np.ndarray, # const intp_t[:]
+ max_bin: int,
+) -> np.ndarray: ... # np.ndarray[np.int64, ndim=2]
+def get_level_sorter(
+ label: np.ndarray, # const int64_t[:]
+ starts: np.ndarray, # const intp_t[:]
+) -> np.ndarray: ... # np.ndarray[np.intp, ndim=1]
+def generate_bins_dt64(
+ values: npt.NDArray[np.int64],
+ binner: np.ndarray, # const int64_t[:]
+ closed: object = ...,
+ hasnans: bool = ...,
+) -> np.ndarray: ... # np.ndarray[np.int64, ndim=1]
+def array_equivalent_object(
+ left: npt.NDArray[np.object_],
+ right: npt.NDArray[np.object_],
+) -> bool: ...
+def has_infs(arr: np.ndarray) -> bool: ... # const floating[:]
+def get_reverse_indexer(
+ indexer: np.ndarray, # const intp_t[:]
+ length: int,
+) -> npt.NDArray[np.intp]: ...
+def is_bool_list(obj: list) -> bool: ...
+def dtypes_all_equal(types: list[DtypeObj]) -> bool: ...
+def is_range_indexer(
+ left: np.ndarray, n: int # np.ndarray[np.int64, ndim=1]
+) -> bool: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/missing.pyi b/contrib/python/pandas/py3/pandas/_libs/missing.pyi
new file mode 100644
index 00000000000..d5c9f1342a0
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/missing.pyi
@@ -0,0 +1,17 @@
+import numpy as np
+from numpy import typing as npt
+
+class NAType:
+ def __new__(cls, *args, **kwargs): ...
+
+NA: NAType
+
+def is_matching_na(
+ left: object, right: object, nan_matches_none: bool = ...
+) -> bool: ...
+def isposinf_scalar(val: object) -> bool: ...
+def isneginf_scalar(val: object) -> bool: ...
+def checknull(val: object, inf_as_na: bool = ...) -> bool: ...
+def isnaobj(arr: np.ndarray, inf_as_na: bool = ...) -> npt.NDArray[np.bool_]: ...
+def is_numeric_na(values: np.ndarray) -> npt.NDArray[np.bool_]: ...
+def is_float_nan(values: np.ndarray) -> npt.NDArray[np.bool_]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/ops.pyi b/contrib/python/pandas/py3/pandas/_libs/ops.pyi
new file mode 100644
index 00000000000..74a6ad87cd2
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/ops.pyi
@@ -0,0 +1,50 @@
+from typing import (
+ Any,
+ Callable,
+ Iterable,
+ Literal,
+ overload,
+)
+
+import numpy as np
+
+from pandas._typing import npt
+
+_BinOp = Callable[[Any, Any], Any]
+_BoolOp = Callable[[Any, Any], bool]
+
+def scalar_compare(
+ values: np.ndarray, # object[:]
+ val: object,
+ op: _BoolOp, # {operator.eq, operator.ne, ...}
+) -> npt.NDArray[np.bool_]: ...
+def vec_compare(
+ left: npt.NDArray[np.object_],
+ right: npt.NDArray[np.object_],
+ op: _BoolOp, # {operator.eq, operator.ne, ...}
+) -> npt.NDArray[np.bool_]: ...
+def scalar_binop(
+ values: np.ndarray, # object[:]
+ val: object,
+ op: _BinOp, # binary operator
+) -> np.ndarray: ...
+def vec_binop(
+ left: np.ndarray, # object[:]
+ right: np.ndarray, # object[:]
+ op: _BinOp, # binary operator
+) -> np.ndarray: ...
+@overload
+def maybe_convert_bool(
+ arr: npt.NDArray[np.object_],
+ true_values: Iterable = ...,
+ false_values: Iterable = ...,
+ convert_to_masked_nullable: Literal[False] = ...,
+) -> tuple[np.ndarray, None]: ...
+@overload
+def maybe_convert_bool(
+ arr: npt.NDArray[np.object_],
+ true_values: Iterable = ...,
+ false_values: Iterable = ...,
+ *,
+ convert_to_masked_nullable: Literal[True],
+) -> tuple[np.ndarray, np.ndarray]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/ops_dispatch.pyi b/contrib/python/pandas/py3/pandas/_libs/ops_dispatch.pyi
new file mode 100644
index 00000000000..91b5a4dbaae
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/ops_dispatch.pyi
@@ -0,0 +1,5 @@
+import numpy as np
+
+def maybe_dispatch_ufunc_to_dunder_op(
+ self, ufunc: np.ufunc, method: str, *inputs, **kwargs
+): ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/parsers.pyi b/contrib/python/pandas/py3/pandas/_libs/parsers.pyi
new file mode 100644
index 00000000000..ec5244469cf
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/parsers.pyi
@@ -0,0 +1,75 @@
+from typing import (
+ Hashable,
+ Literal,
+)
+
+import numpy as np
+
+from pandas._typing import (
+ ArrayLike,
+ Dtype,
+ npt,
+)
+
+STR_NA_VALUES: set[str]
+
+def sanitize_objects(
+ values: npt.NDArray[np.object_],
+ na_values: set,
+) -> int: ...
+
+class TextReader:
+ unnamed_cols: set[str]
+ table_width: int # int64_t
+ leading_cols: int # int64_t
+ header: list[list[int]] # non-negative integers
+ def __init__(
+ self,
+ source,
+ delimiter: bytes | str = ..., # single-character only
+ header=...,
+ header_start: int = ..., # int64_t
+ header_end: int = ..., # uint64_t
+ index_col=...,
+ names=...,
+ tokenize_chunksize: int = ..., # int64_t
+ delim_whitespace: bool = ...,
+ converters=...,
+ skipinitialspace: bool = ...,
+ escapechar: bytes | str | None = ..., # single-character only
+ doublequote: bool = ...,
+ quotechar: str | bytes | None = ..., # at most 1 character
+ quoting: int = ...,
+ lineterminator: bytes | str | None = ..., # at most 1 character
+ comment=...,
+ decimal: bytes | str = ..., # single-character only
+ thousands: bytes | str | None = ..., # single-character only
+ dtype: Dtype | dict[Hashable, Dtype] = ...,
+ usecols=...,
+ error_bad_lines: bool = ...,
+ warn_bad_lines: bool = ...,
+ na_filter: bool = ...,
+ na_values=...,
+ na_fvalues=...,
+ keep_default_na: bool = ...,
+ true_values=...,
+ false_values=...,
+ allow_leading_cols: bool = ...,
+ skiprows=...,
+ skipfooter: int = ..., # int64_t
+ verbose: bool = ...,
+ float_precision: Literal["round_trip", "legacy", "high"] | None = ...,
+ skip_blank_lines: bool = ...,
+ encoding_errors: bytes | str = ...,
+ ) -> None: ...
+ def set_noconvert(self, i: int) -> None: ...
+ def remove_noconvert(self, i: int) -> None: ...
+ def close(self) -> None: ...
+ def read(self, rows: int | None = ...) -> dict[int, ArrayLike]: ...
+ def read_low_memory(self, rows: int | None) -> list[dict[int, ArrayLike]]: ...
+
+# _maybe_upcast, na_values are only exposed for testing
+
+def _maybe_upcast(
+ arr, use_dtype_backend: bool = ..., dtype_backend: str = ...
+) -> np.ndarray: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/properties.pyi b/contrib/python/pandas/py3/pandas/_libs/properties.pyi
new file mode 100644
index 00000000000..aaa44a0cf47
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/properties.pyi
@@ -0,0 +1,27 @@
+from typing import (
+ Sequence,
+ overload,
+)
+
+from pandas._typing import (
+ AnyArrayLike,
+ DataFrame,
+ Index,
+ Series,
+)
+
+# note: this is a lie to make type checkers happy (they special
+# case property). cache_readonly uses attribute names similar to
+# property (fget) but it does not provide fset and fdel.
+cache_readonly = property
+
+class AxisProperty:
+ axis: int
+ def __init__(self, axis: int = ..., doc: str = ...) -> None: ...
+ @overload
+ def __get__(self, obj: DataFrame | Series, type) -> Index: ...
+ @overload
+ def __get__(self, obj: None, type) -> AxisProperty: ...
+ def __set__(
+ self, obj: DataFrame | Series, value: AnyArrayLike | Sequence
+ ) -> None: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/reduction.pyi b/contrib/python/pandas/py3/pandas/_libs/reduction.pyi
new file mode 100644
index 00000000000..525546f26c8
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/reduction.pyi
@@ -0,0 +1,6 @@
+from typing import Any
+
+from pandas._typing import DtypeObj
+
+def check_result_array(obj: object, dtype: DtypeObj) -> None: ...
+def extract_result(res: object) -> Any: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/reshape.pyi b/contrib/python/pandas/py3/pandas/_libs/reshape.pyi
new file mode 100644
index 00000000000..110687fcd0c
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/reshape.pyi
@@ -0,0 +1,16 @@
+import numpy as np
+
+from pandas._typing import npt
+
+def unstack(
+ values: np.ndarray, # reshape_t[:, :]
+ mask: np.ndarray, # const uint8_t[:]
+ stride: int,
+ length: int,
+ width: int,
+ new_values: np.ndarray, # reshape_t[:, :]
+ new_mask: np.ndarray, # uint8_t[:, :]
+) -> None: ...
+def explode(
+ values: npt.NDArray[np.object_],
+) -> tuple[npt.NDArray[np.object_], npt.NDArray[np.int64]]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/sparse.pyi b/contrib/python/pandas/py3/pandas/_libs/sparse.pyi
new file mode 100644
index 00000000000..8c3989b818a
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/sparse.pyi
@@ -0,0 +1,49 @@
+from typing import (
+ Sequence,
+ TypeVar,
+)
+
+import numpy as np
+
+from pandas._typing import npt
+
+_SparseIndexT = TypeVar("_SparseIndexT", bound=SparseIndex)
+
+class SparseIndex:
+ length: int
+ npoints: int
+ def __init__(self) -> None: ...
+ @property
+ def ngaps(self) -> int: ...
+ @property
+ def nbytes(self) -> int: ...
+ @property
+ def indices(self) -> npt.NDArray[np.int32]: ...
+ def equals(self, other) -> bool: ...
+ def lookup(self, index: int) -> np.int32: ...
+ def lookup_array(self, indexer: npt.NDArray[np.int32]) -> npt.NDArray[np.int32]: ...
+ def to_int_index(self) -> IntIndex: ...
+ def to_block_index(self) -> BlockIndex: ...
+ def intersect(self: _SparseIndexT, y_: SparseIndex) -> _SparseIndexT: ...
+ def make_union(self: _SparseIndexT, y_: SparseIndex) -> _SparseIndexT: ...
+
+class IntIndex(SparseIndex):
+ indices: npt.NDArray[np.int32]
+ def __init__(
+ self, length: int, indices: Sequence[int], check_integrity: bool = ...
+ ) -> None: ...
+
+class BlockIndex(SparseIndex):
+ nblocks: int
+ blocs: np.ndarray
+ blengths: np.ndarray
+ def __init__(
+ self, length: int, blocs: np.ndarray, blengths: np.ndarray
+ ) -> None: ...
+
+def make_mask_object_ndarray(
+ arr: npt.NDArray[np.object_], fill_value
+) -> npt.NDArray[np.bool_]: ...
+def get_blocks(
+ indices: npt.NDArray[np.int32],
+) -> tuple[npt.NDArray[np.int32], npt.NDArray[np.int32]]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/testing.pyi b/contrib/python/pandas/py3/pandas/_libs/testing.pyi
new file mode 100644
index 00000000000..01da496975f
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/testing.pyi
@@ -0,0 +1,12 @@
+def assert_dict_equal(a, b, compare_keys: bool = ...): ...
+def assert_almost_equal(
+ a,
+ b,
+ rtol: float = ...,
+ atol: float = ...,
+ check_dtype: bool = ...,
+ obj=...,
+ lobj=...,
+ robj=...,
+ index_values=...,
+): ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslib.pyi b/contrib/python/pandas/py3/pandas/_libs/tslib.pyi
new file mode 100644
index 00000000000..9819b5173db
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslib.pyi
@@ -0,0 +1,32 @@
+from datetime import tzinfo
+
+import numpy as np
+
+from pandas._typing import npt
+
+def format_array_from_datetime(
+ values: npt.NDArray[np.int64],
+ tz: tzinfo | None = ...,
+ format: str | None = ...,
+ na_rep: str | float = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.object_]: ...
+def array_with_unit_to_datetime(
+ values: npt.NDArray[np.object_],
+ unit: str,
+ errors: str = ...,
+) -> tuple[np.ndarray, tzinfo | None]: ...
+def first_non_null(values: np.ndarray) -> int: ...
+def array_to_datetime(
+ values: npt.NDArray[np.object_],
+ errors: str = ...,
+ dayfirst: bool = ...,
+ yearfirst: bool = ...,
+ utc: bool = ...,
+) -> tuple[np.ndarray, tzinfo | None]: ...
+
+# returned ndarray may be object dtype or datetime64[ns]
+
+def array_to_datetime_with_tz(
+ values: npt.NDArray[np.object_], tz: tzinfo
+) -> npt.NDArray[np.int64]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/ccalendar.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/ccalendar.pyi
new file mode 100644
index 00000000000..993f18a61d7
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/ccalendar.pyi
@@ -0,0 +1,12 @@
+DAYS: list[str]
+MONTH_ALIASES: dict[int, str]
+MONTH_NUMBERS: dict[str, int]
+MONTHS: list[str]
+int_to_weekday: dict[int, str]
+
+def get_firstbday(year: int, month: int) -> int: ...
+def get_lastbday(year: int, month: int) -> int: ...
+def get_day_of_year(year: int, month: int, day: int) -> int: ...
+def get_iso_calendar(year: int, month: int, day: int) -> tuple[int, int, int]: ...
+def get_week_of_year(year: int, month: int, day: int) -> int: ...
+def get_days_in_month(year: int, month: int) -> int: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/conversion.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/conversion.pyi
new file mode 100644
index 00000000000..d564d767f7f
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/conversion.pyi
@@ -0,0 +1,14 @@
+from datetime import (
+ datetime,
+ tzinfo,
+)
+
+import numpy as np
+
+DT64NS_DTYPE: np.dtype
+TD64NS_DTYPE: np.dtype
+
+def precision_from_unit(
+ unit: str,
+) -> tuple[int, int]: ... # (int64_t, _)
+def localize_pydatetime(dt: datetime, tz: tzinfo | None) -> datetime: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/dtypes.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/dtypes.pyi
new file mode 100644
index 00000000000..b872241d79a
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/dtypes.pyi
@@ -0,0 +1,81 @@
+from enum import Enum
+
+# These are not public API, but are exposed in the .pyi file because they
+# are imported in tests.
+_attrname_to_abbrevs: dict[str, str]
+_period_code_map: dict[str, int]
+
+def periods_per_day(reso: int) -> int: ...
+def periods_per_second(reso: int) -> int: ...
+def is_supported_unit(reso: int) -> bool: ...
+def npy_unit_to_abbrev(reso: int) -> str: ...
+def get_supported_reso(reso: int) -> int: ...
+def abbrev_to_npy_unit(abbrev: str) -> int: ...
+
+class PeriodDtypeBase:
+ _dtype_code: int # PeriodDtypeCode
+
+ # actually __cinit__
+ def __new__(cls, code: int): ...
+ @property
+ def _freq_group_code(self) -> int: ...
+ @property
+ def _resolution_obj(self) -> Resolution: ...
+ def _get_to_timestamp_base(self) -> int: ...
+ @property
+ def _freqstr(self) -> str: ...
+
+class FreqGroup(Enum):
+ FR_ANN: int
+ FR_QTR: int
+ FR_MTH: int
+ FR_WK: int
+ FR_BUS: int
+ FR_DAY: int
+ FR_HR: int
+ FR_MIN: int
+ FR_SEC: int
+ FR_MS: int
+ FR_US: int
+ FR_NS: int
+ FR_UND: int
+ @staticmethod
+ def from_period_dtype_code(code: int) -> FreqGroup: ...
+
+class Resolution(Enum):
+ RESO_NS: int
+ RESO_US: int
+ RESO_MS: int
+ RESO_SEC: int
+ RESO_MIN: int
+ RESO_HR: int
+ RESO_DAY: int
+ RESO_MTH: int
+ RESO_QTR: int
+ RESO_YR: int
+ def __lt__(self, other: Resolution) -> bool: ...
+ def __ge__(self, other: Resolution) -> bool: ...
+ @property
+ def attrname(self) -> str: ...
+ @classmethod
+ def from_attrname(cls, attrname: str) -> Resolution: ...
+ @classmethod
+ def get_reso_from_freqstr(cls, freq: str) -> Resolution: ...
+ @property
+ def attr_abbrev(self) -> str: ...
+
+class NpyDatetimeUnit(Enum):
+ NPY_FR_Y: int
+ NPY_FR_M: int
+ NPY_FR_W: int
+ NPY_FR_D: int
+ NPY_FR_h: int
+ NPY_FR_m: int
+ NPY_FR_s: int
+ NPY_FR_ms: int
+ NPY_FR_us: int
+ NPY_FR_ns: int
+ NPY_FR_ps: int
+ NPY_FR_fs: int
+ NPY_FR_as: int
+ NPY_FR_GENERIC: int
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/fields.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/fields.pyi
new file mode 100644
index 00000000000..c6cfd44e9f6
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/fields.pyi
@@ -0,0 +1,62 @@
+import numpy as np
+
+from pandas._typing import npt
+
+def build_field_sarray(
+ dtindex: npt.NDArray[np.int64], # const int64_t[:]
+ reso: int, # NPY_DATETIMEUNIT
+) -> np.ndarray: ...
+def month_position_check(fields, weekdays) -> str | None: ...
+def get_date_name_field(
+ dtindex: npt.NDArray[np.int64], # const int64_t[:]
+ field: str,
+ locale: str | None = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.object_]: ...
+def get_start_end_field(
+ dtindex: npt.NDArray[np.int64],
+ field: str,
+ freqstr: str | None = ...,
+ month_kw: int = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.bool_]: ...
+def get_date_field(
+ dtindex: npt.NDArray[np.int64], # const int64_t[:]
+ field: str,
+ reso: int = ..., # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.int32]: ...
+def get_timedelta_field(
+ tdindex: npt.NDArray[np.int64], # const int64_t[:]
+ field: str,
+ reso: int = ..., # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.int32]: ...
+def get_timedelta_days(
+ tdindex: npt.NDArray[np.int64], # const int64_t[:]
+ reso: int = ..., # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.int64]: ...
+def isleapyear_arr(
+ years: np.ndarray,
+) -> npt.NDArray[np.bool_]: ...
+def build_isocalendar_sarray(
+ dtindex: npt.NDArray[np.int64], # const int64_t[:]
+ reso: int, # NPY_DATETIMEUNIT
+) -> np.ndarray: ...
+def _get_locale_names(name_type: str, locale: str | None = ...): ...
+
+class RoundTo:
+ @property
+ def MINUS_INFTY(self) -> int: ...
+ @property
+ def PLUS_INFTY(self) -> int: ...
+ @property
+ def NEAREST_HALF_EVEN(self) -> int: ...
+ @property
+ def NEAREST_HALF_PLUS_INFTY(self) -> int: ...
+ @property
+ def NEAREST_HALF_MINUS_INFTY(self) -> int: ...
+
+def round_nsint64(
+ values: npt.NDArray[np.int64],
+ mode: RoundTo,
+ nanos: int,
+) -> npt.NDArray[np.int64]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/nattype.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/nattype.pyi
new file mode 100644
index 00000000000..04f89437104
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/nattype.pyi
@@ -0,0 +1,132 @@
+from datetime import (
+ datetime,
+ timedelta,
+ tzinfo as _tzinfo,
+)
+
+import numpy as np
+
+from pandas._libs.tslibs.period import Period
+
+NaT: NaTType
+iNaT: int
+nat_strings: set[str]
+
+_NaTComparisonTypes = datetime | timedelta | Period | np.datetime64 | np.timedelta64
+
+class _NatComparison:
+ def __call__(self, other: _NaTComparisonTypes) -> bool: ...
+
+class NaTType:
+ _value: np.int64
+ @property
+ def value(self) -> int: ...
+ @property
+ def asm8(self) -> np.datetime64: ...
+ def to_datetime64(self) -> np.datetime64: ...
+ def to_numpy(
+ self, dtype: np.dtype | str | None = ..., copy: bool = ...
+ ) -> np.datetime64 | np.timedelta64: ...
+ @property
+ def is_leap_year(self) -> bool: ...
+ @property
+ def is_month_start(self) -> bool: ...
+ @property
+ def is_quarter_start(self) -> bool: ...
+ @property
+ def is_year_start(self) -> bool: ...
+ @property
+ def is_month_end(self) -> bool: ...
+ @property
+ def is_quarter_end(self) -> bool: ...
+ @property
+ def is_year_end(self) -> bool: ...
+ @property
+ def day_of_year(self) -> float: ...
+ @property
+ def dayofyear(self) -> float: ...
+ @property
+ def days_in_month(self) -> float: ...
+ @property
+ def daysinmonth(self) -> float: ...
+ @property
+ def day_of_week(self) -> float: ...
+ @property
+ def dayofweek(self) -> float: ...
+ @property
+ def week(self) -> float: ...
+ @property
+ def weekofyear(self) -> float: ...
+ def day_name(self) -> float: ...
+ def month_name(self) -> float: ...
+ def weekday(self) -> float: ...
+ def isoweekday(self) -> float: ...
+ def total_seconds(self) -> float: ...
+ def today(self, *args, **kwargs) -> NaTType: ...
+ def now(self, *args, **kwargs) -> NaTType: ...
+ def to_pydatetime(self) -> NaTType: ...
+ def date(self) -> NaTType: ...
+ def round(self) -> NaTType: ...
+ def floor(self) -> NaTType: ...
+ def ceil(self) -> NaTType: ...
+ @property
+ def tzinfo(self) -> None: ...
+ @property
+ def tz(self) -> None: ...
+ def tz_convert(self, tz: _tzinfo | str | None) -> NaTType: ...
+ def tz_localize(
+ self,
+ tz: _tzinfo | str | None,
+ ambiguous: str = ...,
+ nonexistent: str = ...,
+ ) -> NaTType: ...
+ def replace(
+ self,
+ year: int | None = ...,
+ month: int | None = ...,
+ day: int | None = ...,
+ hour: int | None = ...,
+ minute: int | None = ...,
+ second: int | None = ...,
+ microsecond: int | None = ...,
+ nanosecond: int | None = ...,
+ tzinfo: _tzinfo | None = ...,
+ fold: int | None = ...,
+ ) -> NaTType: ...
+ @property
+ def year(self) -> float: ...
+ @property
+ def quarter(self) -> float: ...
+ @property
+ def month(self) -> float: ...
+ @property
+ def day(self) -> float: ...
+ @property
+ def hour(self) -> float: ...
+ @property
+ def minute(self) -> float: ...
+ @property
+ def second(self) -> float: ...
+ @property
+ def millisecond(self) -> float: ...
+ @property
+ def microsecond(self) -> float: ...
+ @property
+ def nanosecond(self) -> float: ...
+ # inject Timedelta properties
+ @property
+ def days(self) -> float: ...
+ @property
+ def microseconds(self) -> float: ...
+ @property
+ def nanoseconds(self) -> float: ...
+ # inject Period properties
+ @property
+ def qyear(self) -> float: ...
+ def __eq__(self, other: object) -> bool: ...
+ def __ne__(self, other: object) -> bool: ...
+ __lt__: _NatComparison
+ __le__: _NatComparison
+ __gt__: _NatComparison
+ __ge__: _NatComparison
+ def as_unit(self, unit: str, round_ok: bool = ...) -> NaTType: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/np_datetime.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/np_datetime.pyi
new file mode 100644
index 00000000000..0cb0e3b0237
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/np_datetime.pyi
@@ -0,0 +1,21 @@
+import numpy as np
+
+from pandas._typing import npt
+
+class OutOfBoundsDatetime(ValueError): ...
+class OutOfBoundsTimedelta(ValueError): ...
+
+# only exposed for testing
+def py_get_unit_from_dtype(dtype: np.dtype): ...
+def py_td64_to_tdstruct(td64: int, unit: int) -> dict: ...
+def astype_overflowsafe(
+ arr: np.ndarray,
+ dtype: np.dtype,
+ copy: bool = ...,
+ round_ok: bool = ...,
+ is_coerce: bool = ...,
+) -> np.ndarray: ...
+def is_unitless(dtype: np.dtype) -> bool: ...
+def compare_mismatched_resolutions(
+ left: np.ndarray, right: np.ndarray, op
+) -> npt.NDArray[np.bool_]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/offsets.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/offsets.pyi
new file mode 100644
index 00000000000..f1aca471766
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/offsets.pyi
@@ -0,0 +1,279 @@
+from datetime import (
+ datetime,
+ timedelta,
+)
+from typing import (
+ Any,
+ Collection,
+ Literal,
+ TypeVar,
+ overload,
+)
+
+import numpy as np
+
+from pandas._libs.tslibs.nattype import NaTType
+from pandas._typing import npt
+
+from .timedeltas import Timedelta
+
+_BaseOffsetT = TypeVar("_BaseOffsetT", bound=BaseOffset)
+_DatetimeT = TypeVar("_DatetimeT", bound=datetime)
+_TimedeltaT = TypeVar("_TimedeltaT", bound=timedelta)
+
+_relativedelta_kwds: set[str]
+prefix_mapping: dict[str, type]
+
+class ApplyTypeError(TypeError): ...
+
+class BaseOffset:
+ n: int
+ def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
+ def __eq__(self, other) -> bool: ...
+ def __ne__(self, other) -> bool: ...
+ def __hash__(self) -> int: ...
+ @property
+ def kwds(self) -> dict: ...
+ @property
+ def base(self) -> BaseOffset: ...
+ @overload
+ def __add__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
+ @overload
+ def __add__(self: _BaseOffsetT, other: BaseOffset) -> _BaseOffsetT: ...
+ @overload
+ def __add__(self, other: _DatetimeT) -> _DatetimeT: ...
+ @overload
+ def __add__(self, other: _TimedeltaT) -> _TimedeltaT: ...
+ @overload
+ def __radd__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
+ @overload
+ def __radd__(self: _BaseOffsetT, other: BaseOffset) -> _BaseOffsetT: ...
+ @overload
+ def __radd__(self, other: _DatetimeT) -> _DatetimeT: ...
+ @overload
+ def __radd__(self, other: _TimedeltaT) -> _TimedeltaT: ...
+ @overload
+ def __radd__(self, other: NaTType) -> NaTType: ...
+ def __sub__(self: _BaseOffsetT, other: BaseOffset) -> _BaseOffsetT: ...
+ @overload
+ def __rsub__(self, other: npt.NDArray[np.object_]) -> npt.NDArray[np.object_]: ...
+ @overload
+ def __rsub__(self: _BaseOffsetT, other: BaseOffset) -> _BaseOffsetT: ...
+ @overload
+ def __rsub__(self, other: _DatetimeT) -> _DatetimeT: ...
+ @overload
+ def __rsub__(self, other: _TimedeltaT) -> _TimedeltaT: ...
+ @overload
+ def __mul__(self, other: np.ndarray) -> np.ndarray: ...
+ @overload
+ def __mul__(self: _BaseOffsetT, other: int) -> _BaseOffsetT: ...
+ @overload
+ def __rmul__(self, other: np.ndarray) -> np.ndarray: ...
+ @overload
+ def __rmul__(self: _BaseOffsetT, other: int) -> _BaseOffsetT: ...
+ def __neg__(self: _BaseOffsetT) -> _BaseOffsetT: ...
+ def copy(self: _BaseOffsetT) -> _BaseOffsetT: ...
+ @property
+ def name(self) -> str: ...
+ @property
+ def rule_code(self) -> str: ...
+ @property
+ def freqstr(self) -> str: ...
+ def _apply(self, other): ...
+ def _apply_array(self, dtarr) -> None: ...
+ def rollback(self, dt: datetime) -> datetime: ...
+ def rollforward(self, dt: datetime) -> datetime: ...
+ def is_on_offset(self, dt: datetime) -> bool: ...
+ def __setstate__(self, state) -> None: ...
+ def __getstate__(self): ...
+ @property
+ def nanos(self) -> int: ...
+ def is_anchored(self) -> bool: ...
+
+def _get_offset(name: str) -> BaseOffset: ...
+
+class SingleConstructorOffset(BaseOffset):
+ @classmethod
+ def _from_name(cls, suffix: None = ...): ...
+ def __reduce__(self): ...
+
+@overload
+def to_offset(freq: None) -> None: ...
+@overload
+def to_offset(freq: _BaseOffsetT) -> _BaseOffsetT: ...
+@overload
+def to_offset(freq: timedelta | str) -> BaseOffset: ...
+
+class Tick(SingleConstructorOffset):
+ _creso: int
+ _prefix: str
+ _td64_unit: str
+ def __init__(self, n: int = ..., normalize: bool = ...) -> None: ...
+ @property
+ def delta(self) -> Timedelta: ...
+ @property
+ def nanos(self) -> int: ...
+
+def delta_to_tick(delta: timedelta) -> Tick: ...
+
+class Day(Tick): ...
+class Hour(Tick): ...
+class Minute(Tick): ...
+class Second(Tick): ...
+class Milli(Tick): ...
+class Micro(Tick): ...
+class Nano(Tick): ...
+
+class RelativeDeltaOffset(BaseOffset):
+ def __init__(self, n: int = ..., normalize: bool = ..., **kwds: Any) -> None: ...
+
+class BusinessMixin(SingleConstructorOffset):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., offset: timedelta = ...
+ ) -> None: ...
+
+class BusinessDay(BusinessMixin): ...
+
+class BusinessHour(BusinessMixin):
+ def __init__(
+ self,
+ n: int = ...,
+ normalize: bool = ...,
+ start: str | Collection[str] = ...,
+ end: str | Collection[str] = ...,
+ offset: timedelta = ...,
+ ) -> None: ...
+
+class WeekOfMonthMixin(SingleConstructorOffset):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., weekday: int = ...
+ ) -> None: ...
+
+class YearOffset(SingleConstructorOffset):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., month: int | None = ...
+ ) -> None: ...
+
+class BYearEnd(YearOffset): ...
+class BYearBegin(YearOffset): ...
+class YearEnd(YearOffset): ...
+class YearBegin(YearOffset): ...
+
+class QuarterOffset(SingleConstructorOffset):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., startingMonth: int | None = ...
+ ) -> None: ...
+
+class BQuarterEnd(QuarterOffset): ...
+class BQuarterBegin(QuarterOffset): ...
+class QuarterEnd(QuarterOffset): ...
+class QuarterBegin(QuarterOffset): ...
+class MonthOffset(SingleConstructorOffset): ...
+class MonthEnd(MonthOffset): ...
+class MonthBegin(MonthOffset): ...
+class BusinessMonthEnd(MonthOffset): ...
+class BusinessMonthBegin(MonthOffset): ...
+
+class SemiMonthOffset(SingleConstructorOffset):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., day_of_month: int | None = ...
+ ) -> None: ...
+
+class SemiMonthEnd(SemiMonthOffset): ...
+class SemiMonthBegin(SemiMonthOffset): ...
+
+class Week(SingleConstructorOffset):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., weekday: int | None = ...
+ ) -> None: ...
+
+class WeekOfMonth(WeekOfMonthMixin):
+ def __init__(
+ self, n: int = ..., normalize: bool = ..., week: int = ..., weekday: int = ...
+ ) -> None: ...
+
+class LastWeekOfMonth(WeekOfMonthMixin): ...
+
+class FY5253Mixin(SingleConstructorOffset):
+ def __init__(
+ self,
+ n: int = ...,
+ normalize: bool = ...,
+ weekday: int = ...,
+ startingMonth: int = ...,
+ variation: Literal["nearest", "last"] = ...,
+ ) -> None: ...
+
+class FY5253(FY5253Mixin): ...
+
+class FY5253Quarter(FY5253Mixin):
+ def __init__(
+ self,
+ n: int = ...,
+ normalize: bool = ...,
+ weekday: int = ...,
+ startingMonth: int = ...,
+ qtr_with_extra_week: int = ...,
+ variation: Literal["nearest", "last"] = ...,
+ ) -> None: ...
+
+class Easter(SingleConstructorOffset): ...
+
+class _CustomBusinessMonth(BusinessMixin):
+ def __init__(
+ self,
+ n: int = ...,
+ normalize: bool = ...,
+ weekmask: str = ...,
+ holidays: list | None = ...,
+ calendar: np.busdaycalendar | None = ...,
+ offset: timedelta = ...,
+ ) -> None: ...
+
+class CustomBusinessDay(BusinessDay):
+ def __init__(
+ self,
+ n: int = ...,
+ normalize: bool = ...,
+ weekmask: str = ...,
+ holidays: list | None = ...,
+ calendar: np.busdaycalendar | None = ...,
+ offset: timedelta = ...,
+ ) -> None: ...
+
+class CustomBusinessHour(BusinessHour):
+ def __init__(
+ self,
+ n: int = ...,
+ normalize: bool = ...,
+ weekmask: str = ...,
+ holidays: list | None = ...,
+ calendar: np.busdaycalendar | None = ...,
+ start: str = ...,
+ end: str = ...,
+ offset: timedelta = ...,
+ ) -> None: ...
+
+class CustomBusinessMonthEnd(_CustomBusinessMonth): ...
+class CustomBusinessMonthBegin(_CustomBusinessMonth): ...
+class OffsetMeta(type): ...
+class DateOffset(RelativeDeltaOffset, metaclass=OffsetMeta): ...
+
+BDay = BusinessDay
+BMonthEnd = BusinessMonthEnd
+BMonthBegin = BusinessMonthBegin
+CBMonthEnd = CustomBusinessMonthEnd
+CBMonthBegin = CustomBusinessMonthBegin
+CDay = CustomBusinessDay
+
+def roll_qtrday(
+ other: datetime, n: int, month: int, day_opt: str, modby: int
+) -> int: ...
+
+INVALID_FREQ_ERR_MSG: Literal["Invalid frequency: {0}"]
+
+def shift_months(
+ dtindex: npt.NDArray[np.int64], months: int, day_opt: str | None = ...
+) -> npt.NDArray[np.int64]: ...
+
+_offset_map: dict[str, BaseOffset]
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/parsing.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/parsing.pyi
new file mode 100644
index 00000000000..83a5b0085f0
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/parsing.pyi
@@ -0,0 +1,38 @@
+from datetime import datetime
+
+import numpy as np
+
+from pandas._typing import npt
+
+class DateParseError(ValueError): ...
+
+def py_parse_datetime_string(
+ date_string: str,
+ dayfirst: bool = ...,
+ yearfirst: bool = ...,
+) -> datetime: ...
+def parse_datetime_string_with_reso(
+ date_string: str,
+ freq: str | None = ...,
+ dayfirst: bool | None = ...,
+ yearfirst: bool | None = ...,
+) -> tuple[datetime, str]: ...
+def _does_string_look_like_datetime(py_string: str) -> bool: ...
+def quarter_to_myear(year: int, quarter: int, freq: str) -> tuple[int, int]: ...
+def try_parse_dates(
+ values: npt.NDArray[np.object_], # object[:]
+ parser,
+) -> npt.NDArray[np.object_]: ...
+def try_parse_year_month_day(
+ years: npt.NDArray[np.object_], # object[:]
+ months: npt.NDArray[np.object_], # object[:]
+ days: npt.NDArray[np.object_], # object[:]
+) -> npt.NDArray[np.object_]: ...
+def guess_datetime_format(
+ dt_str,
+ dayfirst: bool | None = ...,
+) -> str | None: ...
+def concat_date_cols(
+ date_cols: tuple,
+) -> npt.NDArray[np.object_]: ...
+def get_rule_month(source: str) -> str: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/period.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/period.pyi
new file mode 100644
index 00000000000..946ae1215f1
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/period.pyi
@@ -0,0 +1,127 @@
+from datetime import timedelta
+from typing import Literal
+
+import numpy as np
+
+from pandas._libs.tslibs.nattype import NaTType
+from pandas._libs.tslibs.offsets import BaseOffset
+from pandas._libs.tslibs.timestamps import Timestamp
+from pandas._typing import (
+ Frequency,
+ npt,
+)
+
+INVALID_FREQ_ERR_MSG: str
+DIFFERENT_FREQ: str
+
+class IncompatibleFrequency(ValueError): ...
+
+def periodarr_to_dt64arr(
+ periodarr: npt.NDArray[np.int64], # const int64_t[:]
+ freq: int,
+) -> npt.NDArray[np.int64]: ...
+def period_asfreq_arr(
+ arr: npt.NDArray[np.int64],
+ freq1: int,
+ freq2: int,
+ end: bool,
+) -> npt.NDArray[np.int64]: ...
+def get_period_field_arr(
+ field: str,
+ arr: npt.NDArray[np.int64], # const int64_t[:]
+ freq: int,
+) -> npt.NDArray[np.int64]: ...
+def from_ordinals(
+ values: npt.NDArray[np.int64], # const int64_t[:]
+ freq: timedelta | BaseOffset | str,
+) -> npt.NDArray[np.int64]: ...
+def extract_ordinals(
+ values: npt.NDArray[np.object_],
+ freq: Frequency | int,
+) -> npt.NDArray[np.int64]: ...
+def extract_freq(
+ values: npt.NDArray[np.object_],
+) -> BaseOffset: ...
+
+# exposed for tests
+def period_asfreq(ordinal: int, freq1: int, freq2: int, end: bool) -> int: ...
+def period_ordinal(
+ y: int, m: int, d: int, h: int, min: int, s: int, us: int, ps: int, freq: int
+) -> int: ...
+def freq_to_dtype_code(freq: BaseOffset) -> int: ...
+def validate_end_alias(how: str) -> Literal["E", "S"]: ...
+
+class PeriodMixin:
+ @property
+ def end_time(self) -> Timestamp: ...
+ @property
+ def start_time(self) -> Timestamp: ...
+ def _require_matching_freq(self, other, base: bool = ...) -> None: ...
+
+class Period(PeriodMixin):
+ ordinal: int # int64_t
+ freq: BaseOffset
+
+ # error: "__new__" must return a class instance (got "Union[Period, NaTType]")
+ def __new__( # type: ignore[misc]
+ cls,
+ value=...,
+ freq: int | str | BaseOffset | None = ...,
+ ordinal: int | None = ...,
+ year: int | None = ...,
+ month: int | None = ...,
+ quarter: int | None = ...,
+ day: int | None = ...,
+ hour: int | None = ...,
+ minute: int | None = ...,
+ second: int | None = ...,
+ ) -> Period | NaTType: ...
+ @classmethod
+ def _maybe_convert_freq(cls, freq) -> BaseOffset: ...
+ @classmethod
+ def _from_ordinal(cls, ordinal: int, freq) -> Period: ...
+ @classmethod
+ def now(cls, freq: BaseOffset = ...) -> Period: ...
+ def strftime(self, fmt: str) -> str: ...
+ def to_timestamp(
+ self,
+ freq: str | BaseOffset | None = ...,
+ how: str = ...,
+ ) -> Timestamp: ...
+ def asfreq(self, freq: str | BaseOffset, how: str = ...) -> Period: ...
+ @property
+ def freqstr(self) -> str: ...
+ @property
+ def is_leap_year(self) -> bool: ...
+ @property
+ def daysinmonth(self) -> int: ...
+ @property
+ def days_in_month(self) -> int: ...
+ @property
+ def qyear(self) -> int: ...
+ @property
+ def quarter(self) -> int: ...
+ @property
+ def day_of_year(self) -> int: ...
+ @property
+ def weekday(self) -> int: ...
+ @property
+ def day_of_week(self) -> int: ...
+ @property
+ def week(self) -> int: ...
+ @property
+ def weekofyear(self) -> int: ...
+ @property
+ def second(self) -> int: ...
+ @property
+ def minute(self) -> int: ...
+ @property
+ def hour(self) -> int: ...
+ @property
+ def day(self) -> int: ...
+ @property
+ def month(self) -> int: ...
+ @property
+ def year(self) -> int: ...
+ def __sub__(self, other) -> Period | BaseOffset: ...
+ def __add__(self, other) -> Period: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/strptime.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/strptime.pyi
new file mode 100644
index 00000000000..4565bb7ecf9
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/strptime.pyi
@@ -0,0 +1,13 @@
+import numpy as np
+
+from pandas._typing import npt
+
+def array_strptime(
+ values: npt.NDArray[np.object_],
+ fmt: str | None,
+ exact: bool = ...,
+ errors: str = ...,
+ utc: bool = ...,
+) -> tuple[np.ndarray, np.ndarray]: ...
+
+# first ndarray is M8[ns], second is object ndarray of tzinfo | None
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/timedeltas.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/timedeltas.pyi
new file mode 100644
index 00000000000..d67a330e0b0
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/timedeltas.pyi
@@ -0,0 +1,163 @@
+from datetime import timedelta
+from typing import (
+ ClassVar,
+ Literal,
+ TypeVar,
+ overload,
+)
+
+import numpy as np
+
+from pandas._libs.tslibs import (
+ NaTType,
+ Tick,
+)
+from pandas._typing import npt
+
+# This should be kept consistent with the keys in the dict timedelta_abbrevs
+# in pandas/_libs/tslibs/timedeltas.pyx
+UnitChoices = Literal[
+ "Y",
+ "y",
+ "M",
+ "W",
+ "w",
+ "D",
+ "d",
+ "days",
+ "day",
+ "hours",
+ "hour",
+ "hr",
+ "h",
+ "m",
+ "minute",
+ "min",
+ "minutes",
+ "t",
+ "s",
+ "seconds",
+ "sec",
+ "second",
+ "ms",
+ "milliseconds",
+ "millisecond",
+ "milli",
+ "millis",
+ "l",
+ "us",
+ "microseconds",
+ "microsecond",
+ "µs",
+ "micro",
+ "micros",
+ "u",
+ "ns",
+ "nanoseconds",
+ "nano",
+ "nanos",
+ "nanosecond",
+ "n",
+]
+_S = TypeVar("_S", bound=timedelta)
+
+def ints_to_pytimedelta(
+ arr: npt.NDArray[np.timedelta64],
+ box: bool = ...,
+) -> npt.NDArray[np.object_]: ...
+def array_to_timedelta64(
+ values: npt.NDArray[np.object_],
+ unit: str | None = ...,
+ errors: str = ...,
+) -> np.ndarray: ... # np.ndarray[m8ns]
+def parse_timedelta_unit(unit: str | None) -> UnitChoices: ...
+def delta_to_nanoseconds(
+ delta: np.timedelta64 | timedelta | Tick,
+ reso: int = ..., # NPY_DATETIMEUNIT
+ round_ok: bool = ...,
+) -> int: ...
+def floordiv_object_array(
+ left: np.ndarray, right: npt.NDArray[np.object_]
+) -> np.ndarray: ...
+def truediv_object_array(
+ left: np.ndarray, right: npt.NDArray[np.object_]
+) -> np.ndarray: ...
+
+class Timedelta(timedelta):
+ _creso: int
+ min: ClassVar[Timedelta]
+ max: ClassVar[Timedelta]
+ resolution: ClassVar[Timedelta]
+ value: int # np.int64
+ _value: int # np.int64
+ # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
+ def __new__( # type: ignore[misc]
+ cls: type[_S],
+ value=...,
+ unit: str | None = ...,
+ **kwargs: float | np.integer | np.floating,
+ ) -> _S | NaTType: ...
+ @classmethod
+ def _from_value_and_reso(cls, value: np.int64, reso: int) -> Timedelta: ...
+ @property
+ def days(self) -> int: ...
+ @property
+ def seconds(self) -> int: ...
+ @property
+ def microseconds(self) -> int: ...
+ def total_seconds(self) -> float: ...
+ def to_pytimedelta(self) -> timedelta: ...
+ def to_timedelta64(self) -> np.timedelta64: ...
+ @property
+ def asm8(self) -> np.timedelta64: ...
+ # TODO: round/floor/ceil could return NaT?
+ def round(self: _S, freq: str) -> _S: ...
+ def floor(self: _S, freq: str) -> _S: ...
+ def ceil(self: _S, freq: str) -> _S: ...
+ @property
+ def resolution_string(self) -> str: ...
+ def __add__(self, other: timedelta) -> Timedelta: ...
+ def __radd__(self, other: timedelta) -> Timedelta: ...
+ def __sub__(self, other: timedelta) -> Timedelta: ...
+ def __rsub__(self, other: timedelta) -> Timedelta: ...
+ def __neg__(self) -> Timedelta: ...
+ def __pos__(self) -> Timedelta: ...
+ def __abs__(self) -> Timedelta: ...
+ def __mul__(self, other: float) -> Timedelta: ...
+ def __rmul__(self, other: float) -> Timedelta: ...
+ # error: Signature of "__floordiv__" incompatible with supertype "timedelta"
+ @overload # type: ignore[override]
+ def __floordiv__(self, other: timedelta) -> int: ...
+ @overload
+ def __floordiv__(self, other: float) -> Timedelta: ...
+ @overload
+ def __floordiv__(
+ self, other: npt.NDArray[np.timedelta64]
+ ) -> npt.NDArray[np.intp]: ...
+ @overload
+ def __floordiv__(
+ self, other: npt.NDArray[np.number]
+ ) -> npt.NDArray[np.timedelta64] | Timedelta: ...
+ @overload
+ def __rfloordiv__(self, other: timedelta | str) -> int: ...
+ @overload
+ def __rfloordiv__(self, other: None | NaTType) -> NaTType: ...
+ @overload
+ def __rfloordiv__(self, other: np.ndarray) -> npt.NDArray[np.timedelta64]: ...
+ @overload
+ def __truediv__(self, other: timedelta) -> float: ...
+ @overload
+ def __truediv__(self, other: float) -> Timedelta: ...
+ def __mod__(self, other: timedelta) -> Timedelta: ...
+ def __divmod__(self, other: timedelta) -> tuple[int, Timedelta]: ...
+ def __le__(self, other: timedelta) -> bool: ...
+ def __lt__(self, other: timedelta) -> bool: ...
+ def __ge__(self, other: timedelta) -> bool: ...
+ def __gt__(self, other: timedelta) -> bool: ...
+ def __hash__(self) -> int: ...
+ def isoformat(self) -> str: ...
+ def to_numpy(self) -> np.timedelta64: ...
+ def view(self, dtype: npt.DTypeLike = ...) -> object: ...
+ @property
+ def unit(self) -> str: ...
+ def as_unit(self, unit: str, round_ok: bool = ...) -> Timedelta: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/timestamps.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/timestamps.pyi
new file mode 100644
index 00000000000..26b0c9170aa
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/timestamps.pyi
@@ -0,0 +1,228 @@
+from datetime import (
+ date as _date,
+ datetime,
+ time as _time,
+ timedelta,
+ tzinfo as _tzinfo,
+)
+from time import struct_time
+from typing import (
+ ClassVar,
+ TypeVar,
+ overload,
+)
+
+import numpy as np
+
+from pandas._libs.tslibs import (
+ BaseOffset,
+ NaTType,
+ Period,
+ Tick,
+ Timedelta,
+)
+
+_DatetimeT = TypeVar("_DatetimeT", bound=datetime)
+
+def integer_op_not_supported(obj: object) -> TypeError: ...
+
+class Timestamp(datetime):
+ _creso: int
+ min: ClassVar[Timestamp]
+ max: ClassVar[Timestamp]
+
+ resolution: ClassVar[Timedelta]
+ _value: int # np.int64
+ # error: "__new__" must return a class instance (got "Union[Timestamp, NaTType]")
+ def __new__( # type: ignore[misc]
+ cls: type[_DatetimeT],
+ ts_input: np.integer | float | str | _date | datetime | np.datetime64 = ...,
+ year: int | None = ...,
+ month: int | None = ...,
+ day: int | None = ...,
+ hour: int | None = ...,
+ minute: int | None = ...,
+ second: int | None = ...,
+ microsecond: int | None = ...,
+ tzinfo: _tzinfo | None = ...,
+ *,
+ nanosecond: int | None = ...,
+ tz: str | _tzinfo | None | int = ...,
+ unit: str | int | None = ...,
+ fold: int | None = ...,
+ ) -> _DatetimeT | NaTType: ...
+ @classmethod
+ def _from_value_and_reso(
+ cls, value: int, reso: int, tz: _tzinfo | None
+ ) -> Timestamp: ...
+ @property
+ def value(self) -> int: ... # np.int64
+ @property
+ def year(self) -> int: ...
+ @property
+ def month(self) -> int: ...
+ @property
+ def day(self) -> int: ...
+ @property
+ def hour(self) -> int: ...
+ @property
+ def minute(self) -> int: ...
+ @property
+ def second(self) -> int: ...
+ @property
+ def microsecond(self) -> int: ...
+ @property
+ def nanosecond(self) -> int: ...
+ @property
+ def tzinfo(self) -> _tzinfo | None: ...
+ @property
+ def tz(self) -> _tzinfo | None: ...
+ @property
+ def fold(self) -> int: ...
+ @classmethod
+ def fromtimestamp(
+ cls: type[_DatetimeT], ts: float, tz: _tzinfo | None = ...
+ ) -> _DatetimeT: ...
+ @classmethod
+ def utcfromtimestamp(cls: type[_DatetimeT], ts: float) -> _DatetimeT: ...
+ @classmethod
+ def today(cls: type[_DatetimeT], tz: _tzinfo | str | None = ...) -> _DatetimeT: ...
+ @classmethod
+ def fromordinal(
+ cls: type[_DatetimeT],
+ ordinal: int,
+ tz: _tzinfo | str | None = ...,
+ ) -> _DatetimeT: ...
+ @classmethod
+ def now(cls: type[_DatetimeT], tz: _tzinfo | str | None = ...) -> _DatetimeT: ...
+ @classmethod
+ def utcnow(cls: type[_DatetimeT]) -> _DatetimeT: ...
+ # error: Signature of "combine" incompatible with supertype "datetime"
+ @classmethod
+ def combine( # type: ignore[override]
+ cls, date: _date, time: _time
+ ) -> datetime: ...
+ @classmethod
+ def fromisoformat(cls: type[_DatetimeT], date_string: str) -> _DatetimeT: ...
+ def strftime(self, format: str) -> str: ...
+ def __format__(self, fmt: str) -> str: ...
+ def toordinal(self) -> int: ...
+ def timetuple(self) -> struct_time: ...
+ def timestamp(self) -> float: ...
+ def utctimetuple(self) -> struct_time: ...
+ def date(self) -> _date: ...
+ def time(self) -> _time: ...
+ def timetz(self) -> _time: ...
+ # LSP violation: nanosecond is not present in datetime.datetime.replace
+ # and has positional args following it
+ def replace( # type: ignore[override]
+ self: _DatetimeT,
+ year: int | None = ...,
+ month: int | None = ...,
+ day: int | None = ...,
+ hour: int | None = ...,
+ minute: int | None = ...,
+ second: int | None = ...,
+ microsecond: int | None = ...,
+ nanosecond: int | None = ...,
+ tzinfo: _tzinfo | type[object] | None = ...,
+ fold: int | None = ...,
+ ) -> _DatetimeT: ...
+ # LSP violation: datetime.datetime.astimezone has a default value for tz
+ def astimezone( # type: ignore[override]
+ self: _DatetimeT, tz: _tzinfo | None
+ ) -> _DatetimeT: ...
+ def ctime(self) -> str: ...
+ def isoformat(self, sep: str = ..., timespec: str = ...) -> str: ...
+ @classmethod
+ def strptime(cls, date_string: str, format: str) -> datetime: ...
+ def utcoffset(self) -> timedelta | None: ...
+ def tzname(self) -> str | None: ...
+ def dst(self) -> timedelta | None: ...
+ def __le__(self, other: datetime) -> bool: ... # type: ignore[override]
+ def __lt__(self, other: datetime) -> bool: ... # type: ignore[override]
+ def __ge__(self, other: datetime) -> bool: ... # type: ignore[override]
+ def __gt__(self, other: datetime) -> bool: ... # type: ignore[override]
+ # error: Signature of "__add__" incompatible with supertype "date"/"datetime"
+ @overload # type: ignore[override]
+ def __add__(self, other: np.ndarray) -> np.ndarray: ...
+ @overload
+ def __add__(
+ self: _DatetimeT, other: timedelta | np.timedelta64 | Tick
+ ) -> _DatetimeT: ...
+ def __radd__(self: _DatetimeT, other: timedelta) -> _DatetimeT: ...
+ @overload # type: ignore[override]
+ def __sub__(self, other: datetime) -> Timedelta: ...
+ @overload
+ def __sub__(
+ self: _DatetimeT, other: timedelta | np.timedelta64 | Tick
+ ) -> _DatetimeT: ...
+ def __hash__(self) -> int: ...
+ def weekday(self) -> int: ...
+ def isoweekday(self) -> int: ...
+ def isocalendar(self) -> tuple[int, int, int]: ...
+ @property
+ def is_leap_year(self) -> bool: ...
+ @property
+ def is_month_start(self) -> bool: ...
+ @property
+ def is_quarter_start(self) -> bool: ...
+ @property
+ def is_year_start(self) -> bool: ...
+ @property
+ def is_month_end(self) -> bool: ...
+ @property
+ def is_quarter_end(self) -> bool: ...
+ @property
+ def is_year_end(self) -> bool: ...
+ def to_pydatetime(self, warn: bool = ...) -> datetime: ...
+ def to_datetime64(self) -> np.datetime64: ...
+ def to_period(self, freq: BaseOffset | str = ...) -> Period: ...
+ def to_julian_date(self) -> np.float64: ...
+ @property
+ def asm8(self) -> np.datetime64: ...
+ def tz_convert(self: _DatetimeT, tz: _tzinfo | str | None) -> _DatetimeT: ...
+ # TODO: could return NaT?
+ def tz_localize(
+ self: _DatetimeT,
+ tz: _tzinfo | str | None,
+ ambiguous: str = ...,
+ nonexistent: str = ...,
+ ) -> _DatetimeT: ...
+ def normalize(self: _DatetimeT) -> _DatetimeT: ...
+ # TODO: round/floor/ceil could return NaT?
+ def round(
+ self: _DatetimeT, freq: str, ambiguous: bool | str = ..., nonexistent: str = ...
+ ) -> _DatetimeT: ...
+ def floor(
+ self: _DatetimeT, freq: str, ambiguous: bool | str = ..., nonexistent: str = ...
+ ) -> _DatetimeT: ...
+ def ceil(
+ self: _DatetimeT, freq: str, ambiguous: bool | str = ..., nonexistent: str = ...
+ ) -> _DatetimeT: ...
+ def day_name(self, locale: str | None = ...) -> str: ...
+ def month_name(self, locale: str | None = ...) -> str: ...
+ @property
+ def day_of_week(self) -> int: ...
+ @property
+ def dayofweek(self) -> int: ...
+ @property
+ def day_of_year(self) -> int: ...
+ @property
+ def dayofyear(self) -> int: ...
+ @property
+ def quarter(self) -> int: ...
+ @property
+ def week(self) -> int: ...
+ def to_numpy(
+ self, dtype: np.dtype | None = ..., copy: bool = ...
+ ) -> np.datetime64: ...
+ @property
+ def _date_repr(self) -> str: ...
+ @property
+ def days_in_month(self) -> int: ...
+ @property
+ def daysinmonth(self) -> int: ...
+ @property
+ def unit(self) -> str: ...
+ def as_unit(self, unit: str, round_ok: bool = ...) -> Timestamp: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/timezones.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/timezones.pyi
new file mode 100644
index 00000000000..4e9f0c6ae6c
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/timezones.pyi
@@ -0,0 +1,21 @@
+from datetime import (
+ datetime,
+ tzinfo,
+)
+from typing import Callable
+
+import numpy as np
+
+# imported from dateutil.tz
+dateutil_gettz: Callable[[str], tzinfo]
+
+def tz_standardize(tz: tzinfo) -> tzinfo: ...
+def tz_compare(start: tzinfo | None, end: tzinfo | None) -> bool: ...
+def infer_tzinfo(
+ start: datetime | None,
+ end: datetime | None,
+) -> tzinfo | None: ...
+def maybe_get_tz(tz: str | int | np.int64 | tzinfo | None) -> tzinfo | None: ...
+def get_timezone(tz: tzinfo) -> tzinfo | str: ...
+def is_utc(tz: tzinfo | None) -> bool: ...
+def is_fixed_offset(tz: tzinfo) -> bool: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/tzconversion.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/tzconversion.pyi
new file mode 100644
index 00000000000..a354765a348
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/tzconversion.pyi
@@ -0,0 +1,21 @@
+from datetime import (
+ timedelta,
+ tzinfo,
+)
+from typing import Iterable
+
+import numpy as np
+
+from pandas._typing import npt
+
+# tz_convert_from_utc_single exposed for testing
+def tz_convert_from_utc_single(
+ val: np.int64, tz: tzinfo, creso: int = ...
+) -> np.int64: ...
+def tz_localize_to_utc(
+ vals: npt.NDArray[np.int64],
+ tz: tzinfo | None,
+ ambiguous: str | bool | Iterable[bool] | None = ...,
+ nonexistent: str | timedelta | np.timedelta64 | None = ...,
+ creso: int = ..., # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.int64]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/tslibs/vectorized.pyi b/contrib/python/pandas/py3/pandas/_libs/tslibs/vectorized.pyi
new file mode 100644
index 00000000000..3fd9e2501e6
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/tslibs/vectorized.pyi
@@ -0,0 +1,43 @@
+"""
+For cython types that cannot be represented precisely, closest-available
+python equivalents are used, and the precise types kept as adjacent comments.
+"""
+from datetime import tzinfo
+
+import numpy as np
+
+from pandas._libs.tslibs.dtypes import Resolution
+from pandas._typing import npt
+
+def dt64arr_to_periodarr(
+ stamps: npt.NDArray[np.int64],
+ freq: int,
+ tz: tzinfo | None,
+ reso: int = ..., # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.int64]: ...
+def is_date_array_normalized(
+ stamps: npt.NDArray[np.int64],
+ tz: tzinfo | None,
+ reso: int, # NPY_DATETIMEUNIT
+) -> bool: ...
+def normalize_i8_timestamps(
+ stamps: npt.NDArray[np.int64],
+ tz: tzinfo | None,
+ reso: int, # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.int64]: ...
+def get_resolution(
+ stamps: npt.NDArray[np.int64],
+ tz: tzinfo | None = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
+) -> Resolution: ...
+def ints_to_pydatetime(
+ arr: npt.NDArray[np.int64],
+ tz: tzinfo | None = ...,
+ box: str = ...,
+ reso: int = ..., # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.object_]: ...
+def tz_convert_from_utc(
+ stamps: npt.NDArray[np.int64],
+ tz: tzinfo | None,
+ reso: int = ..., # NPY_DATETIMEUNIT
+) -> npt.NDArray[np.int64]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/window/aggregations.pyi b/contrib/python/pandas/py3/pandas/_libs/window/aggregations.pyi
new file mode 100644
index 00000000000..b926a7cb734
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/window/aggregations.pyi
@@ -0,0 +1,127 @@
+from typing import (
+ Any,
+ Callable,
+ Literal,
+)
+
+import numpy as np
+
+from pandas._typing import (
+ WindowingRankType,
+ npt,
+)
+
+def roll_sum(
+ values: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+def roll_mean(
+ values: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+def roll_var(
+ values: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+ ddof: int = ...,
+) -> np.ndarray: ... # np.ndarray[float]
+def roll_skew(
+ values: np.ndarray, # np.ndarray[np.float64]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+def roll_kurt(
+ values: np.ndarray, # np.ndarray[np.float64]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+def roll_median_c(
+ values: np.ndarray, # np.ndarray[np.float64]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+def roll_max(
+ values: np.ndarray, # np.ndarray[np.float64]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+def roll_min(
+ values: np.ndarray, # np.ndarray[np.float64]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+) -> np.ndarray: ... # np.ndarray[float]
+def roll_quantile(
+ values: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+ quantile: float, # float64_t
+ interpolation: Literal["linear", "lower", "higher", "nearest", "midpoint"],
+) -> np.ndarray: ... # np.ndarray[float]
+def roll_rank(
+ values: np.ndarray,
+ start: np.ndarray,
+ end: np.ndarray,
+ minp: int,
+ percentile: bool,
+ method: WindowingRankType,
+ ascending: bool,
+) -> np.ndarray: ... # np.ndarray[float]
+def roll_apply(
+ obj: object,
+ start: np.ndarray, # np.ndarray[np.int64]
+ end: np.ndarray, # np.ndarray[np.int64]
+ minp: int, # int64_t
+ function: Callable[..., Any],
+ raw: bool,
+ args: tuple[Any, ...],
+ kwargs: dict[str, Any],
+) -> npt.NDArray[np.float64]: ...
+def roll_weighted_sum(
+ values: np.ndarray, # const float64_t[:]
+ weights: np.ndarray, # const float64_t[:]
+ minp: int,
+) -> np.ndarray: ... # np.ndarray[np.float64]
+def roll_weighted_mean(
+ values: np.ndarray, # const float64_t[:]
+ weights: np.ndarray, # const float64_t[:]
+ minp: int,
+) -> np.ndarray: ... # np.ndarray[np.float64]
+def roll_weighted_var(
+ values: np.ndarray, # const float64_t[:]
+ weights: np.ndarray, # const float64_t[:]
+ minp: int, # int64_t
+ ddof: int, # unsigned int
+) -> np.ndarray: ... # np.ndarray[np.float64]
+def ewm(
+ vals: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # const int64_t[:]
+ end: np.ndarray, # const int64_t[:]
+ minp: int,
+ com: float, # float64_t
+ adjust: bool,
+ ignore_na: bool,
+ deltas: np.ndarray, # const float64_t[:]
+ normalize: bool,
+) -> np.ndarray: ... # np.ndarray[np.float64]
+def ewmcov(
+ input_x: np.ndarray, # const float64_t[:]
+ start: np.ndarray, # const int64_t[:]
+ end: np.ndarray, # const int64_t[:]
+ minp: int,
+ input_y: np.ndarray, # const float64_t[:]
+ com: float, # float64_t
+ adjust: bool,
+ ignore_na: bool,
+ bias: bool,
+) -> np.ndarray: ... # np.ndarray[np.float64]
diff --git a/contrib/python/pandas/py3/pandas/_libs/window/indexers.pyi b/contrib/python/pandas/py3/pandas/_libs/window/indexers.pyi
new file mode 100644
index 00000000000..c9bc64be34a
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/window/indexers.pyi
@@ -0,0 +1,12 @@
+import numpy as np
+
+from pandas._typing import npt
+
+def calculate_variable_window_bounds(
+ num_values: int, # int64_t
+ window_size: int, # int64_t
+ min_periods,
+ center: bool,
+ closed: str | None,
+ index: np.ndarray, # const int64_t[:]
+) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ...
diff --git a/contrib/python/pandas/py3/pandas/_libs/writers.pyi b/contrib/python/pandas/py3/pandas/_libs/writers.pyi
new file mode 100644
index 00000000000..7b41856525d
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/_libs/writers.pyi
@@ -0,0 +1,20 @@
+import numpy as np
+
+from pandas._typing import ArrayLike
+
+def write_csv_rows(
+ data: list[ArrayLike],
+ data_index: np.ndarray,
+ nlevels: int,
+ cols: np.ndarray,
+ writer: object, # _csv.writer
+) -> None: ...
+def convert_json_to_lines(arr: str) -> str: ...
+def max_len_string_array(
+ arr: np.ndarray, # pandas_string[:]
+) -> int: ...
+def word_len(val: object) -> int: ...
+def string_array_replace_from_nan_rep(
+ arr: np.ndarray, # np.ndarray[object, ndim=1]
+ nan_rep: object,
+) -> None: ...
diff --git a/contrib/python/pandas/py3/pandas/io/sas/_byteswap.pyi b/contrib/python/pandas/py3/pandas/io/sas/_byteswap.pyi
new file mode 100644
index 00000000000..bb0dbfc6a50
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/io/sas/_byteswap.pyi
@@ -0,0 +1,5 @@
+def read_float_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ...
+def read_double_with_byteswap(data: bytes, offset: int, byteswap: bool) -> float: ...
+def read_uint16_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
+def read_uint32_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
+def read_uint64_with_byteswap(data: bytes, offset: int, byteswap: bool) -> int: ...
diff --git a/contrib/python/pandas/py3/pandas/io/sas/_sas.pyi b/contrib/python/pandas/py3/pandas/io/sas/_sas.pyi
new file mode 100644
index 00000000000..5d65e2b56b5
--- /dev/null
+++ b/contrib/python/pandas/py3/pandas/io/sas/_sas.pyi
@@ -0,0 +1,7 @@
+from pandas.io.sas.sas7bdat import SAS7BDATReader
+
+class Parser:
+ def __init__(self, parser: SAS7BDATReader) -> None: ...
+ def read(self, nrows: int) -> None: ...
+
+def get_subheader_index(signature: bytes) -> int: ...