
    g#                   &   d dl mZ d dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d dlm	Z	 d dlm
Z
 d d	lmZ d d
lmZ d dlmZ d dlZd dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d dlmZ d dlm Z! d dl"m#Z# d dl"m$Z$ d dl"m%Z% d dl"m&Z& d dl"m'Z' d dl"m(Z( d dl"m)Z) d dl"m*Z+ d dl"m,Z, d dl"m-Z- d d l.m/Z0 d d!l1m2Z3 d d"l4m5Z5 d d#l6m7Z7 d d$l6m8Z8 d d%l6m9Z9 d d&l6m:Z: d d'l6m;Z; d d(l6m<Z< d d)l6m=Z= d d*l6m>Z> d d+l6m?Z? d d,l6m@Z@ d d-l6mAZA d d.l6mBZB d d/l6mCZC d d0l6mDZD d d1l6mEZE d d2l6mFZF d d3l6mGZG d d4l6mHZH d d5l6mIZI d d6l6mJZJ d d7l6mKZK d d8l6mLZL d d9l6mMZM d d:l6mNZN d d;l6mOZO d d<l6mPZP d d=lQmRZR d d>lQmSZS d d?lQmTZT d d@lUmVZV d dAlUmWZW d dBlUmXZX d dClYmZZZ d dDlYm[Z[ d dElYm\Z\ d dFlYm]Z] d dGlYm^Z^ d dHlYm_Z_ d dIlYm`Z` d dJlYmaZa d dKlYmbZb d dLlYmcZc er(d dMldmeZe d dlfZgd dNlhmiZi d dOljmkZk d dPl"mlZl d dQlUmmZm d dRlUmnZn  edS      Zo G dT dUeeV         Z G dV dWeeW         Z G dX dYe3e         Z2 G dZ d[e      Z G d\ d]e0      Z/edd^       Zpedd_       Zpedd`       Zpedda       Zpeddb       Zp	 	 	 	 ddcZpeddddde	 	 	 	 	 	 	 	 	 	 	 	 	 ddf       Zqedddddg	 	 	 	 	 	 	 	 	 	 	 	 	 ddh       Zqedddddddi	 	 	 	 	 	 	 	 	 	 	 	 	 ddj       Zqedddddddi	 	 	 	 	 	 	 	 	 	 	 	 	 ddk       Zqedddddddl	 	 	 	 	 	 	 	 	 	 	 	 	 ddm       Zqedddddddl	 	 	 	 	 	 	 	 	 	 	 	 	 ddn       Zqedddddddo	 	 	 	 	 	 	 	 	 	 	 	 	 ddp       Zqedddddddq	 	 	 	 	 	 	 	 	 	 	 	 	 ddr       Zqeddddddddds	 	 	 	 	 	 	 	 	 	 	 	 	 ddt       Zqeddddddddds	 	 	 	 	 	 	 	 	 	 	 	 	 ddu       Zqedddddddddv	 	 	 	 	 	 	 	 	 	 	 	 	 ddw       Zqedddddddddx	 	 	 	 	 	 	 	 	 	 	 	 	 ddy       Zqedddddddddz	 	 	 	 	 	 	 	 	 	 	 	 	 dd{       Zqeddddddddd|	 	 	 	 	 	 	 	 	 	 	 	 	 dd}       Zqeddddddddddd~	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqeddddde	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqedddddg	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqedddddddi	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqedddddddi	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqedddddddl	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqedddddddl	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqedddddddo	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqedddddddq	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqeddddddddds	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqeddddddddds	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqeddddddddd	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqeddddddddd	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqeddddddddd	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqeddddddddd	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqeddddddddddd	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqedd	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Zqddddddd	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddZqeddd	 	 	 	 	 dd       Zreddd	 	 	 	 	 dd       Zreddddd       Zredd       Zreddd	 	 	 	 	 dd       Zreddd	 	 	 	 	 dd       Zreddddd       Zredd       Zrddd	 	 	 	 	 	 	 ddZr	 dddddddd	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddZsddZtd dZuddZvddZwdddZxddZyddZzddZ{ddZ|ddZ}ddZ~ddZddZddZddZddZedd	 	 	 	 	 dd       Zedd	 	 	 	 	 dd       Zdd	 	 	 	 	 ddZddd	 	 	 	 	 	 	 	 	 ddZ G d de      Z G d dee      Zd	dÄZ 	 d	 	 	 	 	 	 	 	 	 d
dĄZ	 	 	 	 	 	 ddńZ*	 dddƜ	 	 	 	 	 	 	 ddǄZ	 d	 	 	 	 	 	 	 ddȄZ	 	 	 	 	 	 	 	 ddɄZ	 	 	 	 	 	 	 	 ddʄZ	 	 	 	 	 	 	 	 dd˄Z	 	 	 	 	 	 	 	 dd̄Zg d͢Zy(      )annotationswraps)TYPE_CHECKING)Any)Callable)Iterable)Literal)Sequence)TypeVar)overload)warnN)dependencies)
exceptions)	selectors	DataFrame	LazyFrame)Expr)Then)When)when)_from_dict_impl)_from_numpy_impl)_new_series_impl)_read_csv_impl)_read_parquet_impl)_scan_csv_impl)_scan_parquet_impl)
from_arrow)	get_level)show_versions)SchemaSeries)dtypes)Array)Boolean)Categorical)Date)Datetime)Decimal)Duration)Enum)Field)Float32)Float64)Int8)Int16)Int32)Int64)Int128)List)Object)String)Struct)UInt8)UInt16)UInt32)UInt64)UInt128)Unknown)_from_native_impl)get_native_namespace)to_py_scalar)IntoDataFrameT)
IntoFrameT)IntoSeriesT)Implementation)Version)generate_temporary_column_name)is_ordered_categorical)maybe_align_index)maybe_convert_dtypes)maybe_get_index)maybe_reset_index)maybe_set_indexvalidate_strict_and_pass_though)
ModuleType)Self)DType)ArrowStreamExportable)IntoExpr)
IntoSeriesTc                      e Zd ZdZedd       Zedd       Zed d       Zed!d       Zed"d       Zed#d       Zed$d       Zed%d	       Zed&d
       Zed'd       Zed(d       Zed)d       Zed*d       Zed+d       Zed,d       Zed-d       Zd. fdZd/ fdZ	eddd0d       Z
ed1d       Z
ed2d       Z
dd	 	 	 d2 fdZ
d3 fdZd3 fdZd4dZ xZS )5r   a  Narwhals DataFrame, backed by a native eager dataframe.

    !!! warning
        This class is not meant to be instantiated directly - instead:

        - If the native object is a eager dataframe from one of the supported
            backend (e.g. pandas.DataFrame, polars.DataFrame, pyarrow.Table),
            you can use [`narwhals.from_native`][]:
            ```py
            narwhals.from_native(native_dataframe)
            narwhals.from_native(native_dataframe, eager_only=True)
            ```

        - If the object is a dictionary of column names and generic sequences mapping
            (e.g. `dict[str, list]`), you can create a DataFrame via
            [`narwhals.from_dict`][]:
            ```py
            narwhals.from_dict(
                data={"a": [1, 2, 3]},
                native_namespace=narwhals.get_native_namespace(another_object),
            )
            ```
    c                    t         S Nr%   selfs    P/var/www/openai/venv/lib/python3.12/site-packages/narwhals/stable/v1/__init__.py_serieszDataFrame._seriesz   s        c                    t         S r\   r   r]   s    r_   
_lazyframezDataFrame._lazyframe~       ra   c                     y r\    r^   items     r_   __getitem__zDataFrame.__getitem__       FIra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__       NQra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__   rj   ra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__   rj   ra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__       >Ara   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__   rl   ra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__   rj   ra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__   rj   ra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__   rp   ra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__       8;ra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__       03ra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__   rv   ra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__   rx   ra   c                     y r\   rf   rg   s     r_   ri   zDataFrame.__getitem__   rp   ra   c                "    t         |   |      S r\   )superri   )r^   rh   	__class__s     r_   ri   zDataFrame.__getitem__   s    w"4((ra   c                     t         |          S )a  Lazify the DataFrame (if possible).

        If a library does not support lazy execution, then this is a no-op.

        Returns:
            A new LazyFrame.

        Examples:
            Construct pandas, Polars and PyArrow DataFrames:

            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoFrame
            >>> data = {"foo": [1, 2, 3], "bar": [6.0, 7.0, 8.0], "ham": ["a", "b", "c"]}
            >>> df_pd = pd.DataFrame(data)
            >>> df_pl = pl.DataFrame(data)
            >>> df_pa = pa.table(data)

            We define a library agnostic function:

            >>> def agnostic_lazy(df_native: IntoFrame) -> IntoFrame:
            ...     df = nw.from_native(df_native)
            ...     return df.lazy().to_native()

            Note that then, pandas and pyarrow dataframe stay eager, but Polars DataFrame
            becomes a Polars LazyFrame:

            >>> agnostic_lazy(df_pd)
               foo  bar ham
            0    1  6.0   a
            1    2  7.0   b
            2    3  8.0   c
            >>> agnostic_lazy(df_pl)
            <LazyFrame ...>
            >>> agnostic_lazy(df_pa)
            pyarrow.Table
            foo: int64
            bar: double
            ham: string
            ----
            foo: [[1,2,3]]
            bar: [[6,7,8]]
            ham: [["a","b","c"]]
        )r}   lazyr^   r~   s    r_   r   zDataFrame.lazy   s    ^ w|~ra   .	as_seriesc                    y r\   rf   r^   r   s     r_   to_dictzDataFrame.to_dict       ORra   c                    y r\   rf   r   s     r_   r   zDataFrame.to_dict   s    MPra   c                    y r\   rf   r   s     r_   r   zDataFrame.to_dict   s    WZra   Tc               $    t         |   |      S )aq  Convert DataFrame to a dictionary mapping column name to values.

        Arguments:
            as_series: If set to true ``True``, then the values are Narwhals Series,
                    otherwise the values are Any.

        Returns:
            A mapping from column name to values / Series.

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoDataFrame
            >>> data = {
            ...     "A": [1, 2, 3, 4, 5],
            ...     "fruits": ["banana", "banana", "apple", "apple", "banana"],
            ...     "B": [5, 4, 3, 2, 1],
            ...     "animals": ["beetle", "fly", "beetle", "beetle", "beetle"],
            ...     "optional": [28, 300, None, 2, -30],
            ... }
            >>> df_pd = pd.DataFrame(data)
            >>> df_pl = pl.DataFrame(data)
            >>> df_pa = pa.table(data)

            We define a library agnostic function:

            >>> def agnostic_to_dict(
            ...     df_native: IntoDataFrame,
            ... ) -> dict[str, list[int | str | float | None]]:
            ...     df = nw.from_native(df_native)
            ...     return df.to_dict(as_series=False)

            We can then pass either pandas, Polars or PyArrow to `agnostic_to_dict`:

            >>> agnostic_to_dict(df_pd)
            {'A': [1, 2, 3, 4, 5], 'fruits': ['banana', 'banana', 'apple', 'apple', 'banana'], 'B': [5, 4, 3, 2, 1], 'animals': ['beetle', 'fly', 'beetle', 'beetle', 'beetle'], 'optional': [28.0, 300.0, nan, 2.0, -30.0]}
            >>> agnostic_to_dict(df_pl)
            {'A': [1, 2, 3, 4, 5], 'fruits': ['banana', 'banana', 'apple', 'apple', 'banana'], 'B': [5, 4, 3, 2, 1], 'animals': ['beetle', 'fly', 'beetle', 'beetle', 'beetle'], 'optional': [28, 300, None, 2, -30]}
            >>> agnostic_to_dict(df_pa)
            {'A': [1, 2, 3, 4, 5], 'fruits': ['banana', 'banana', 'apple', 'apple', 'banana'], 'B': [5, 4, 3, 2, 1], 'animals': ['beetle', 'fly', 'beetle', 'beetle', 'beetle'], 'optional': [28, 300, None, 2, -30]}
        r   )r}   r   )r^   r   r~   s     r_   r   zDataFrame.to_dict   s    \ w33ra   c                     t         |          S )a  Get a mask of all duplicated rows in this DataFrame.

        Returns:
            A new Series.

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoDataFrame
            >>> from narwhals.typing import IntoSeries
            >>> data = {
            ...     "a": [1, 2, 3, 1],
            ...     "b": ["x", "y", "z", "x"],
            ... }
            >>> df_pd = pd.DataFrame(data)
            >>> df_pl = pl.DataFrame(data)
            >>> df_pa = pa.table(data)

            Let's define a dataframe-agnostic function:

            >>> def agnostic_is_duplicated(df_native: IntoDataFrame) -> IntoSeries:
            ...     df = nw.from_native(df_native, eager_only=True)
            ...     return df.is_duplicated().to_native()

            We can then pass any supported library such as Pandas, Polars, or PyArrow
            to `agnostic_is_duplicated`:

            >>> agnostic_is_duplicated(df_pd)
            0     True
            1    False
            2    False
            3     True
            dtype: bool

            >>> agnostic_is_duplicated(df_pl)  # doctest: +NORMALIZE_WHITESPACE
            shape: (4,)
            Series: '' [bool]
            [
                true
                false
                false
                true
            ]
            >>> agnostic_is_duplicated(df_pa)  # doctest: +ELLIPSIS
            <pyarrow.lib.ChunkedArray object at ...>
            [
              [
                true,
                false,
                false,
                true
              ]
            ]
        )r}   is_duplicatedr   s    r_   r   zDataFrame.is_duplicated  s    r w$&&ra   c                     t         |          S )a  Get a mask of all unique rows in this DataFrame.

        Returns:
            A new Series.

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoDataFrame
            >>> from narwhals.typing import IntoSeries
            >>> data = {
            ...     "a": [1, 2, 3, 1],
            ...     "b": ["x", "y", "z", "x"],
            ... }
            >>> df_pd = pd.DataFrame(data)
            >>> df_pl = pl.DataFrame(data)
            >>> df_pa = pa.table(data)

            Let's define a dataframe-agnostic function:

            >>> def agnostic_is_unique(df_native: IntoDataFrame) -> IntoSeries:
            ...     df = nw.from_native(df_native, eager_only=True)
            ...     return df.is_unique().to_native()

            We can then pass any supported library such as Pandas, Polars, or PyArrow
            to `agnostic_is_unique`:

            >>> agnostic_is_unique(df_pd)
            0    False
            1     True
            2     True
            3    False
            dtype: bool

            >>> agnostic_is_unique(df_pl)  # doctest: +NORMALIZE_WHITESPACE
            shape: (4,)
            Series: '' [bool]
            [
                false
                 true
                 true
                false
            ]
            >>> agnostic_is_unique(df_pa)  # doctest: +ELLIPSIS
            <pyarrow.lib.ChunkedArray object at ...>
            [
              [
                false,
                true,
                true,
                false
              ]
            ]
        )r}   	is_uniquer   s    r_   r   zDataFrame.is_uniqueK  s    r w ""ra   c                P    | j                  t               j                               S )zbPrivate, just used to test the stable API.

        Returns:
            A new DataFrame.
        selectall_l1_normr]   s    r_   r   zDataFrame._l1_norm       {{35>>+,,ra   )returnztype[Series])r   ztype[LazyFrame[Any]])rh   ztuple[Sequence[int], slice]r   rT   )rh   z#tuple[Sequence[int], Sequence[int]]r   rT   )rh   ztuple[slice, Sequence[int]]r   rT   )rh   ztuple[Sequence[int], str]r   r&   )rh   ztuple[slice, str]r   r&   )rh   z#tuple[Sequence[int], Sequence[str]]r   rT   )rh   ztuple[slice, Sequence[str]]r   rT   )rh   ztuple[Sequence[int], int]r   r&   )rh   ztuple[slice, int]r   r&   )rh   zSequence[int]r   rT   )rh   strr   r&   )rh   zSequence[str]r   rT   )rh   slicer   rT   )rh   ztuple[slice, slice]r   rT   )rh   r   r   r   )r   LazyFrame[Any])r   Literal[True]r   zdict[str, Series])r   Literal[False]r   zdict[str, list[Any]])r   boolr   z(dict[str, Series] | dict[str, list[Any]])r^   rT   r   r&   r^   rT   r   rT   )__name__
__module____qualname____doc__propertyr`   rc   r   ri   r   r   r   r   r   __classcell__r~   s   @r_   r   r   ^   sZ   6     I IQ QI II IA AQ QI II IA A; ;3 3; ;3 3A A)/f 47R RP PZ Z#'.4 .4	1.4`9'v9#v-ra   r   c                  >     e Zd ZdZedd       Zd fdZddZ xZS )r   a  Narwhals LazyFrame, backed by a native lazyframe.

    !!! warning
        This class is not meant to be instantiated directly - instead use
        [`narwhals.from_native`][] with a native
        object that is a lazy dataframe from one of the supported
        backend (e.g. polars.LazyFrame, dask_expr._collection.DataFrame):
        ```py
        narwhals.from_native(native_lazyframe)
        ```
    c                    t         S r\   r   r]   s    r_   
_dataframezLazyFrame._dataframe  rd   ra   c                     t         |          S )u	  Materialize this LazyFrame into a DataFrame.

        Returns:
            DataFrame

        Examples:
            >>> import narwhals as nw
            >>> import polars as pl
            >>> import dask.dataframe as dd
            >>> data = {
            ...     "a": ["a", "b", "a", "b", "b", "c"],
            ...     "b": [1, 2, 3, 4, 5, 6],
            ...     "c": [6, 5, 4, 3, 2, 1],
            ... }
            >>> lf_pl = pl.LazyFrame(data)
            >>> lf_dask = dd.from_dict(data, npartitions=2)

            >>> lf = nw.from_native(lf_pl)
            >>> lf  # doctest:+ELLIPSIS
            ┌─────────────────────────────┐
            |     Narwhals LazyFrame      |
            |-----------------------------|
            |<LazyFrame at ...
            └─────────────────────────────┘
            >>> df = lf.group_by("a").agg(nw.all().sum()).collect()
            >>> df.to_native().sort("a")
            shape: (3, 3)
            ┌─────┬─────┬─────┐
            │ a   ┆ b   ┆ c   │
            │ --- ┆ --- ┆ --- │
            │ str ┆ i64 ┆ i64 │
            ╞═════╪═════╪═════╡
            │ a   ┆ 4   ┆ 10  │
            │ b   ┆ 11  ┆ 10  │
            │ c   ┆ 6   ┆ 1   │
            └─────┴─────┴─────┘

            >>> lf = nw.from_native(lf_dask)
            >>> lf
            ┌───────────────────────────────────┐
            |        Narwhals LazyFrame         |
            |-----------------------------------|
            |Dask DataFrame Structure:          |
            |                    a      b      c|
            |npartitions=2                      |
            |0              string  int64  int64|
            |3                 ...    ...    ...|
            |5                 ...    ...    ...|
            |Dask Name: frompandas, 1 expression|
            |Expr=df                            |
            └───────────────────────────────────┘
            >>> df = lf.group_by("a").agg(nw.col("b", "c").sum()).collect()
            >>> df.to_native()
               a   b   c
            0  a   4  10
            1  b  11  10
            2  c   6   1
        )r}   collectr   s    r_   r   zLazyFrame.collect  s    v w  ra   c                P    | j                  t               j                               S )zbPrivate, just used to test the stable API.

        Returns:
            A new lazyframe.
        r   r]   s    r_   r   zLazyFrame._l1_norm  r   ra   r   ztype[DataFrame[Any]]r   DataFrame[Any]r   )	r   r   r   r   r   r   r   r   r   r   s   @r_   r   r     s'    
  ;!z-ra   r   c                  H    e Zd ZdZedd       Zd fdZddddd	 	 	 	 	 	 	 	 	 	 	 d fdZdddddd	dd
	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fdZddd	 	 	 	 	 	 	 	 	 d fdZ	ddd	 	 	 	 	 	 	 	 	 d fdZ
ddd	d	 	 	 	 	 	 	 	 	 	 	 d fdZddd	d	 	 	 	 	 	 	 	 	 	 	 d fdZ xZS )r&   ai  Narwhals Series, backed by a native series.

    !!! warning
        This class is not meant to be instantiated directly - instead:

        - If the native object is a series from one of the supported backend (e.g.
            pandas.Series, polars.Series, pyarrow.ChunkedArray), you can use
            [`narwhals.from_native`][]:
            ```py
            narwhals.from_native(native_series, allow_series=True)
            narwhals.from_native(native_series, series_only=True)
            ```

        - If the object is a generic sequence (e.g. a list or a tuple of values), you can
            create a series via [`narwhals.new_series`][]:
            ```py
            narwhals.new_series(
                name=name,
                values=values,
                native_namespace=narwhals.get_native_namespace(another_object),
            )
            ```
    c                    t         S r\   r   r]   s    r_   r   zSeries._dataframe  rd   ra   c                     t         |          S )u  Convert to dataframe.

        Returns:
            A DataFrame containing this Series as a single column.

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoDataFrame
            >>> from narwhals.typing import IntoSeries

            >>> data = [1, 2]
            >>> s_pd = pd.Series(data, name="a")
            >>> s_pl = pl.Series("a", data)
            >>> s_pa = pa.chunked_array([data])

            We define a library agnostic function:

            >>> def agnostic_to_frame(s_native: IntoSeries) -> IntoDataFrame:
            ...     s = nw.from_native(s_native, series_only=True)
            ...     return s.to_frame().to_native()

            We can then pass any supported library such as pandas, Polars, or
            PyArrow to `agnostic_to_frame`:

            >>> agnostic_to_frame(s_pd)
               a
            0  1
            1  2

            >>> agnostic_to_frame(s_pl)
            shape: (2, 1)
            ┌─────┐
            │ a   │
            │ --- │
            │ i64 │
            ╞═════╡
            │ 1   │
            │ 2   │
            └─────┘

            >>> agnostic_to_frame(s_pa)
            pyarrow.Table
            : int64
            ----
            : [[1,2]]
        )r}   to_framer   s    r_   r   zSeries.to_frame  s    d w!!ra   FNsortparallelname	normalizec               *    t         |   ||||      S )uq	  Count the occurrences of unique values.

        Arguments:
            sort: Sort the output by count in descending order. If set to False (default),
                the order of the output is random.
            parallel: Execute the computation in parallel. Used for Polars only.
            name: Give the resulting count column a specific name; if `normalize` is True
                defaults to "proportion", otherwise defaults to "count".
            normalize: If true gives relative frequencies of the unique values

        Returns:
            A DataFrame with two columns:
            - The original values as first column
            - Either count or proportion as second column, depending on normalize parameter.

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoDataFrame
            >>> from narwhals.typing import IntoSeries

            >>> data = [1, 1, 2, 3, 2]
            >>> s_pd = pd.Series(data, name="s")
            >>> s_pl = pl.Series(values=data, name="s")
            >>> s_pa = pa.chunked_array([data])

            Let's define a dataframe-agnostic function:

            >>> def agnostic_value_counts(s_native: IntoSeries) -> IntoDataFrame:
            ...     s = nw.from_native(s_native, series_only=True)
            ...     return s.value_counts(sort=True).to_native()

            We can then pass any supported library such as pandas, Polars, or
            PyArrow to `agnostic_value_counts`:

            >>> agnostic_value_counts(s_pd)
               s  count
            0  1      2
            1  2      2
            2  3      1

            >>> agnostic_value_counts(s_pl)  # doctest: +NORMALIZE_WHITESPACE
            shape: (3, 2)
            ┌─────┬───────┐
            │ s   ┆ count │
            │ --- ┆ ---   │
            │ i64 ┆ u32   │
            ╞═════╪═══════╡
            │ 1   ┆ 2     │
            │ 2   ┆ 2     │
            │ 3   ┆ 1     │
            └─────┴───────┘

            >>> agnostic_value_counts(s_pa)
            pyarrow.Table
            : int64
            count: int64
            ----
            : [[1,2,3]]
            count: [[2,2,1]]
        r   )r}   value_counts)r^   r   r   r   r   r~   s        r_   r   zSeries.value_counts:  s&    N w#ty $ 
 	
ra   T   comspan	half_lifealphaadjustmin_periodsignore_nullsc          	     r    ddl m} ddlm}	 d}
t	        |
| |	              t
        |   |||||||      S )a  Compute exponentially-weighted moving average.

        !!! warning
            This functionality is considered **unstable**. It may be changed at any point
            without it being considered a breaking change.

        Arguments:
            com: Specify decay in terms of center of mass, $\gamma$, with <br> $\alpha = \frac{1}{1+\gamma}\forall\gamma\geq0$
            span: Specify decay in terms of span, $\theta$, with <br> $\alpha = \frac{2}{\theta + 1} \forall \theta \geq 1$
            half_life: Specify decay in terms of half-life, $\tau$, with <br> $\alpha = 1 - \exp \left\{ \frac{ -\ln(2) }{ \tau } \right\} \forall \tau > 0$
            alpha: Specify smoothing factor alpha directly, $0 < \alpha \leq 1$.
            adjust: Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings

                - When `adjust=True` (the default) the EW function is calculated
                  using weights $w_i = (1 - \alpha)^i$
                - When `adjust=False` the EW function is calculated recursively by
                  $$
                  y_0=x_0
                  $$
                  $$
                  y_t = (1 - \alpha)y_{t - 1} + \alpha x_t
                  $$
            min_periods: Minimum number of observations in window required to have a value (otherwise result is null).
            ignore_nulls: Ignore missing values when calculating weights.

                - When `ignore_nulls=False` (default), weights are based on absolute
                  positions.
                  For example, the weights of $x_0$ and $x_2$ used in
                  calculating the final weighted average of $[x_0, None, x_2]$ are
                  $(1-\alpha)^2$ and $1$ if `adjust=True`, and
                  $(1-\alpha)^2$ and $\alpha$ if `adjust=False`.
                - When `ignore_nulls=True`, weights are based
                  on relative positions. For example, the weights of
                  $x_0$ and $x_2$ used in calculating the final weighted
                  average of $[x_0, None, x_2]$ are
                  $1-\alpha$ and $1$ if `adjust=True`,
                  and $1-\alpha$ and $\alpha$ if `adjust=False`.

        Returns:
            Series

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoSeriesT

            >>> data = [1, 2, 3]
            >>> s_pd = pd.Series(name="a", data=data)
            >>> s_pl = pl.Series(name="a", values=data)

            We define a library agnostic function:

            >>> def agnostic_ewm_mean(s_native: IntoSeriesT) -> IntoSeriesT:
            ...     s = nw.from_native(s_native, series_only=True)
            ...     return s.ewm_mean(com=1, ignore_nulls=False).to_native()

            We can then pass any supported library such as pandas or Polars
            to `agnostic_ewm_mean`:

            >>> agnostic_ewm_mean(s_pd)
            0    1.000000
            1    1.666667
            2    2.428571
            Name: a, dtype: float64

            >>> agnostic_ewm_mean(s_pl)  # doctest: +NORMALIZE_WHITESPACE
            shape: (3,)
            Series: 'a' [f64]
            [
               1.0
               1.666667
               2.428571
            ]
        r   NarwhalsUnstableWarningfind_stacklevelz^`Series.ewm_mean` is being called from the stable API although considered an unstable feature.messagecategory
stacklevelr   narwhals.exceptionsr   narwhals.utilsr   r   r}   ewm_meanr^   r   r   r   r   r   r   r   r   r   msgr~   s              r_   r   zSeries.ewm_mean  sU    l 	@2# 	 	S#:GXYw#%   
 	
ra   r   centerc               j    ddl m} ddlm} d}t	        || |              t
        |   |||      S )a	  Apply a rolling sum (moving sum) over the values.

        !!! warning
            This functionality is considered **unstable**. It may be changed at any point
            without it being considered a breaking change.

        A window of length `window_size` will traverse the values. The resulting values
        will be aggregated to their sum.

        The window at a given row will include the row itself and the `window_size - 1`
        elements before it.

        Arguments:
            window_size: The length of the window in number of elements. It must be a
                strictly positive integer.
            min_periods: The number of values in the window that should be non-null before
                computing a result. If set to `None` (default), it will be set equal to
                `window_size`. If provided, it must be a strictly positive integer, and
                less than or equal to `window_size`
            center: Set the labels at the center of the window.

        Returns:
            A new series.

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoSeriesT

            >>> data = [1.0, 2.0, 3.0, 4.0]
            >>> s_pd = pd.Series(data)
            >>> s_pl = pl.Series(data)
            >>> s_pa = pa.chunked_array([data])

            We define a library agnostic function:

            >>> def agnostic_rolling_sum(s_native: IntoSeriesT) -> IntoSeriesT:
            ...     s = nw.from_native(s_native, series_only=True)
            ...     return s.rolling_sum(window_size=2).to_native()

            We can then pass any supported library such as pandas, Polars, or
            PyArrow to `agnostic_rolling_sum`:

            >>> agnostic_rolling_sum(s_pd)
            0    NaN
            1    3.0
            2    5.0
            3    7.0
            dtype: float64

            >>> agnostic_rolling_sum(s_pl)  # doctest:+NORMALIZE_WHITESPACE
            shape: (4,)
            Series: '' [f64]
            [
               null
               3.0
               5.0
               7.0
            ]

            >>> agnostic_rolling_sum(s_pa)  # doctest:+ELLIPSIS
            <pyarrow.lib.ChunkedArray object at ...>
            [
              [
                null,
                3,
                5,
                7
              ]
            ]
        r   r   r   za`Series.rolling_sum` is being called from the stable API although considered an unstable feature.r   window_sizer   r   r   r   r   r   r   r}   rolling_sumr^   r   r   r   r   r   r   r~   s          r_   r   zSeries.rolling_sum  sI    ` 	@2# 	 	S#:GXYw"## # 
 	
ra   c               j    ddl m} ddlm} d}t	        || |              t
        |   |||      S )a	  Apply a rolling mean (moving mean) over the values.

        !!! warning
            This functionality is considered **unstable**. It may be changed at any point
            without it being considered a breaking change.

        A window of length `window_size` will traverse the values. The resulting values
        will be aggregated to their mean.

        The window at a given row will include the row itself and the `window_size - 1`
        elements before it.

        Arguments:
            window_size: The length of the window in number of elements. It must be a
                strictly positive integer.
            min_periods: The number of values in the window that should be non-null before
                computing a result. If set to `None` (default), it will be set equal to
                `window_size`. If provided, it must be a strictly positive integer, and
                less than or equal to `window_size`
            center: Set the labels at the center of the window.

        Returns:
            A new series.

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoSeriesT

            >>> data = [1.0, 2.0, 3.0, 4.0]
            >>> s_pd = pd.Series(data)
            >>> s_pl = pl.Series(data)
            >>> s_pa = pa.chunked_array([data])

            We define a library agnostic function:

            >>> def agnostic_rolling_mean(s_native: IntoSeriesT) -> IntoSeriesT:
            ...     s = nw.from_native(s_native, series_only=True)
            ...     return s.rolling_mean(window_size=2).to_native()

            We can then pass any supported library such as pandas, Polars, or
            PyArrow to `agnostic_rolling_mean`:

            >>> agnostic_rolling_mean(s_pd)
            0    NaN
            1    1.5
            2    2.5
            3    3.5
            dtype: float64

            >>> agnostic_rolling_mean(s_pl)  # doctest:+NORMALIZE_WHITESPACE
            shape: (4,)
            Series: '' [f64]
            [
               null
               1.5
               2.5
               3.5
            ]

            >>> agnostic_rolling_mean(s_pa)  # doctest:+ELLIPSIS
            <pyarrow.lib.ChunkedArray object at ...>
            [
              [
                null,
                1.5,
                2.5,
                3.5
              ]
            ]
        r   r   r   zb`Series.rolling_mean` is being called from the stable API although considered an unstable feature.r   r   r   r   r   r   r   r}   rolling_meanr   s          r_   r   zSeries.rolling_meanK  sI    ` 	@2# 	 	S#:GXYw### $ 
 	
ra   r   r   ddofc               l    ddl m} ddlm} d}t	        || |              t
        |   ||||      S )a/
  Apply a rolling variance (moving variance) over the values.

        !!! warning
            This functionality is considered **unstable**. It may be changed at any point
            without it being considered a breaking change.

        A window of length `window_size` will traverse the values. The resulting values
        will be aggregated to their variance.

        The window at a given row will include the row itself and the `window_size - 1`
        elements before it.

        Arguments:
            window_size: The length of the window in number of elements. It must be a
                strictly positive integer.
            min_periods: The number of values in the window that should be non-null before
                computing a result. If set to `None` (default), it will be set equal to
                `window_size`. If provided, it must be a strictly positive integer, and
                less than or equal to `window_size`.
            center: Set the labels at the center of the window.
            ddof: Delta Degrees of Freedom; the divisor for a length N window is N - ddof.

        Returns:
            A new series.

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoSeriesT

            >>> data = [1.0, 3.0, 1.0, 4.0]
            >>> s_pd = pd.Series(data)
            >>> s_pl = pl.Series(data)
            >>> s_pa = pa.chunked_array([data])

            We define a library agnostic function:

            >>> def agnostic_rolling_var(s_native: IntoSeriesT) -> IntoSeriesT:
            ...     s = nw.from_native(s_native, series_only=True)
            ...     return s.rolling_var(window_size=2, min_periods=1).to_native()

            We can then pass any supported library such as pandas, Polars, or
            PyArrow to `agnostic_rolling_var`:

            >>> agnostic_rolling_var(s_pd)
            0    NaN
            1    2.0
            2    2.0
            3    4.5
            dtype: float64

            >>> agnostic_rolling_var(s_pl)  # doctest:+NORMALIZE_WHITESPACE
            shape: (4,)
            Series: '' [f64]
            [
               null
               2.0
               2.0
               4.5
            ]

            >>> agnostic_rolling_var(s_pa)  # doctest:+ELLIPSIS
            <pyarrow.lib.ChunkedArray object at ...>
            [
              [
                nan,
                2,
                2,
                4.5
              ]
            ]
        r   r   r   za`Series.rolling_var` is being called from the stable API although considered an unstable feature.r   r   r   r   r   r   r   r   r   r   r}   rolling_var	r^   r   r   r   r   r   r   r   r~   s	           r_   r   zSeries.rolling_var  L    d 	@2# 	 	S#:GXYw"##	 # 
 	
ra   c               l    ddl m} ddlm} d}t	        || |              t
        |   ||||      S )a
  Apply a rolling standard deviation (moving standard deviation) over the values.

        !!! warning
            This functionality is considered **unstable**. It may be changed at any point
            without it being considered a breaking change.

        A window of length `window_size` will traverse the values. The resulting values
        will be aggregated to their standard deviation.

        The window at a given row will include the row itself and the `window_size - 1`
        elements before it.

        Arguments:
            window_size: The length of the window in number of elements. It must be a
                strictly positive integer.
            min_periods: The number of values in the window that should be non-null before
                computing a result. If set to `None` (default), it will be set equal to
                `window_size`. If provided, it must be a strictly positive integer, and
                less than or equal to `window_size`.
            center: Set the labels at the center of the window.
            ddof: Delta Degrees of Freedom; the divisor for a length N window is N - ddof.

        Returns:
            A new series.

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoSeriesT

            >>> data = [1.0, 3.0, 1.0, 4.0]
            >>> s_pd = pd.Series(data)
            >>> s_pl = pl.Series(data)
            >>> s_pa = pa.chunked_array([data])

            We define a library agnostic function:

            >>> def agnostic_rolling_std(s_native: IntoSeriesT) -> IntoSeriesT:
            ...     s = nw.from_native(s_native, series_only=True)
            ...     return s.rolling_std(window_size=2, min_periods=1).to_native()

            We can then pass any supported library such as pandas, Polars, or
            PyArrow to `agnostic_rolling_std`:

            >>> agnostic_rolling_std(s_pd)
            0         NaN
            1    1.414214
            2    1.414214
            3    2.121320
            dtype: float64

            >>> agnostic_rolling_std(s_pl)  # doctest:+NORMALIZE_WHITESPACE
            shape: (4,)
            Series: '' [f64]
            [
               null
               1.414214
               1.414214
               2.12132
            ]

            >>> agnostic_rolling_std(s_pa)  # doctest:+ELLIPSIS
            <pyarrow.lib.ChunkedArray object at ...>
            [
              [
                nan,
                1.4142135623730951,
                1.4142135623730951,
                2.1213203435596424
              ]
            ]
        r   r   r   za`Series.rolling_std` is being called from the stable API although considered an unstable feature.r   r   r   r   r   r   r   r}   rolling_stdr   s	           r_   r   zSeries.rolling_std
  r   ra   r   r   )r^   rT   r   r   r   r   r   z
str | Noner   r   r   r   r^   rT   r   float | Noner   r   r   r   r   r   r   r   r   intr   r   r   rT   
r^   rT   r   r   r   
int | Noner   r   r   rT   r^   rT   r   r   r   r   r   r   r   r   r   rT   )r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   s   @r_   r&   r&     s   6  2"n I
I
 I
 	I

 I
 I
 
I
\ !!"&""f
f
 f
 	f

  f
 f
 f
 f
 f
 
f
X #'\
\
\
  	\

 \
 
\
D #'\
\
\
  	\

 \
 
\
D #'_
_
_
  	_

 _
 _
 
_
J #'_
_
_
  	_

 _
 _
 
_
 _
ra   r&   c                      e Zd Zd fdZdddddddd	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fdZddd	 	 	 	 	 	 	 	 	 d fd	Zddd	 	 	 	 	 	 	 	 	 d fd
Zdddd	 	 	 	 	 	 	 	 	 	 	 d fdZdddd	 	 	 	 	 	 	 	 	 	 	 d fdZ xZ	S )r   c                     t         |          S r\   )r}   _taxicab_normr   s    r_   r   zExpr._l1_normm  s    w$&&ra   NTr   Fr   c          	     r    ddl m} ddlm}	 d}
t	        |
| |	              t
        |   |||||||      S )u  Compute exponentially-weighted moving average.

        !!! warning
            This functionality is considered **unstable**. It may be changed at any point
            without it being considered a breaking change.

        Arguments:
            com: Specify decay in terms of center of mass, $\gamma$, with <br> $\alpha = \frac{1}{1+\gamma}\forall\gamma\geq0$
            span: Specify decay in terms of span, $\theta$, with <br> $\alpha = \frac{2}{\theta + 1} \forall \theta \geq 1$
            half_life: Specify decay in terms of half-life, $\tau$, with <br> $\alpha = 1 - \exp \left\{ \frac{ -\ln(2) }{ \tau } \right\} \forall \tau > 0$
            alpha: Specify smoothing factor alpha directly, $0 < \alpha \leq 1$.
            adjust: Divide by decaying adjustment factor in beginning periods to account for imbalance in relative weightings

                - When `adjust=True` (the default) the EW function is calculated
                  using weights $w_i = (1 - \alpha)^i$
                - When `adjust=False` the EW function is calculated recursively by
                  $$
                  y_0=x_0
                  $$
                  $$
                  y_t = (1 - \alpha)y_{t - 1} + \alpha x_t
                  $$
            min_periods: Minimum number of observations in window required to have a value, (otherwise result is null).
            ignore_nulls: Ignore missing values when calculating weights.

                - When `ignore_nulls=False` (default), weights are based on absolute
                  positions.
                  For example, the weights of $x_0$ and $x_2$ used in
                  calculating the final weighted average of $[x_0, None, x_2]$ are
                  $(1-\alpha)^2$ and $1$ if `adjust=True`, and
                  $(1-\alpha)^2$ and $\alpha$ if `adjust=False`.
                - When `ignore_nulls=True`, weights are based
                  on relative positions. For example, the weights of
                  $x_0$ and $x_2$ used in calculating the final weighted
                  average of $[x_0, None, x_2]$ are
                  $1-\alpha$ and $1$ if `adjust=True`,
                  and $1-\alpha$ and $\alpha$ if `adjust=False`.

        Returns:
            Expr

        Examples:
            >>> import pandas as pd
            >>> import polars as pl
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoFrameT
            >>> data = {"a": [1, 2, 3]}
            >>> df_pd = pd.DataFrame(data)
            >>> df_pl = pl.DataFrame(data)

            We define a library agnostic function:

            >>> def my_library_agnostic_function(df_native: IntoFrameT) -> IntoFrameT:
            ...     df = nw.from_native(df_native)
            ...     return df.select(
            ...         nw.col("a").ewm_mean(com=1, ignore_nulls=False)
            ...     ).to_native()

            We can then pass either pandas or Polars to `func`:

            >>> my_library_agnostic_function(df_pd)
                      a
            0  1.000000
            1  1.666667
            2  2.428571

            >>> my_library_agnostic_function(df_pl)  # doctest: +NORMALIZE_WHITESPACE
            shape: (3, 1)
            ┌──────────┐
            │ a        │
            │ ---      │
            │ f64      │
            ╞══════════╡
            │ 1.0      │
            │ 1.666667 │
            │ 2.428571 │
            └──────────┘
        r   r   r   z\`Expr.ewm_mean` is being called from the stable API although considered an unstable feature.r   r   r   r   s              r_   r   zExpr.ewm_meanp  sU    r 	@2# 	 	S#:GXYw#%   
 	
ra   r   c               j    ddl m} ddlm} d}t	        || |              t
        |   |||      S )u7
  Apply a rolling sum (moving sum) over the values.

        !!! warning
            This functionality is considered **unstable**. It may be changed at any point
            without it being considered a breaking change.

        A window of length `window_size` will traverse the values. The resulting values
        will be aggregated to their sum.

        The window at a given row will include the row itself and the `window_size - 1`
        elements before it.

        Arguments:
            window_size: The length of the window in number of elements. It must be a
                strictly positive integer.
            min_periods: The number of values in the window that should be non-null before
                computing a result. If set to `None` (default), it will be set equal to
                `window_size`. If provided, it must be a strictly positive integer, and
                less than or equal to `window_size`
            center: Set the labels at the center of the window.

        Returns:
            A new expression.

        Examples:
            >>> import narwhals as nw
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> data = {"a": [1.0, 2.0, None, 4.0]}
            >>> df_pd = pd.DataFrame(data)
            >>> df_pl = pl.DataFrame(data)
            >>> df_pa = pa.table(data)

            We define a library agnostic function:

            >>> @nw.narwhalify
            ... def agnostic_rolling_sum(df):
            ...     return df.with_columns(
            ...         b=nw.col("a").rolling_sum(window_size=3, min_periods=1)
            ...     )

            We can then pass any supported library such as Pandas, Polars, or PyArrow to `func`:

            >>> agnostic_rolling_sum(df_pd)
                 a    b
            0  1.0  1.0
            1  2.0  3.0
            2  NaN  3.0
            3  4.0  6.0

            >>> agnostic_rolling_sum(df_pl)
            shape: (4, 2)
            ┌──────┬─────┐
            │ a    ┆ b   │
            │ ---  ┆ --- │
            │ f64  ┆ f64 │
            ╞══════╪═════╡
            │ 1.0  ┆ 1.0 │
            │ 2.0  ┆ 3.0 │
            │ null ┆ 3.0 │
            │ 4.0  ┆ 6.0 │
            └──────┴─────┘

            >>> agnostic_rolling_sum(df_pa)  #  doctest:+ELLIPSIS
            pyarrow.Table
            a: double
            b: double
            ----
            a: [[1,2,null,4]]
            b: [[1,3,3,6]]
        r   r   r   z_`Expr.rolling_sum` is being called from the stable API although considered an unstable feature.r   r   r   r   s          r_   r   zExpr.rolling_sum  sI    ^ 	@2# 	 	S#:GXYw"## # 
 	
ra   c               j    ddl m} ddlm} d}t	        || |              t
        |   |||      S )uC
  Apply a rolling mean (moving mean) over the values.

        !!! warning
            This functionality is considered **unstable**. It may be changed at any point
            without it being considered a breaking change.

        A window of length `window_size` will traverse the values. The resulting values
        will be aggregated to their mean.

        The window at a given row will include the row itself and the `window_size - 1`
        elements before it.

        Arguments:
            window_size: The length of the window in number of elements. It must be a
                strictly positive integer.
            min_periods: The number of values in the window that should be non-null before
                computing a result. If set to `None` (default), it will be set equal to
                `window_size`. If provided, it must be a strictly positive integer, and
                less than or equal to `window_size`
            center: Set the labels at the center of the window.

        Returns:
            A new expression.

        Examples:
            >>> import narwhals as nw
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> data = {"a": [1.0, 2.0, None, 4.0]}
            >>> df_pd = pd.DataFrame(data)
            >>> df_pl = pl.DataFrame(data)
            >>> df_pa = pa.table(data)

            We define a library agnostic function:

            >>> @nw.narwhalify
            ... def agnostic_rolling_mean(df):
            ...     return df.with_columns(
            ...         b=nw.col("a").rolling_mean(window_size=3, min_periods=1)
            ...     )

            We can then pass any supported library such as Pandas, Polars, or PyArrow to `func`:

            >>> agnostic_rolling_mean(df_pd)
                 a    b
            0  1.0  1.0
            1  2.0  1.5
            2  NaN  1.5
            3  4.0  3.0

            >>> agnostic_rolling_mean(df_pl)
            shape: (4, 2)
            ┌──────┬─────┐
            │ a    ┆ b   │
            │ ---  ┆ --- │
            │ f64  ┆ f64 │
            ╞══════╪═════╡
            │ 1.0  ┆ 1.0 │
            │ 2.0  ┆ 1.5 │
            │ null ┆ 1.5 │
            │ 4.0  ┆ 3.0 │
            └──────┴─────┘

            >>> agnostic_rolling_mean(df_pa)  #  doctest:+ELLIPSIS
            pyarrow.Table
            a: double
            b: double
            ----
            a: [[1,2,null,4]]
            b: [[1,1.5,1.5,3]]
        r   r   r   z``Expr.rolling_mean` is being called from the stable API although considered an unstable feature.r   r   r   r   s          r_   r   zExpr.rolling_mean8  sI    ^ 	@2# 	 	S#:GXYw### $ 
 	
ra   r   c               l    ddl m} ddlm} d}t	        || |              t
        |   ||||      S )uB  Apply a rolling variance (moving variance) over the values.

        !!! warning
            This functionality is considered **unstable**. It may be changed at any point
            without it being considered a breaking change.

        A window of length `window_size` will traverse the values. The resulting values
        will be aggregated to their variance.

        The window at a given row will include the row itself and the `window_size - 1`
        elements before it.

        Arguments:
            window_size: The length of the window in number of elements. It must be a
                strictly positive integer.
            min_periods: The number of values in the window that should be non-null before
                computing a result. If set to `None` (default), it will be set equal to
                `window_size`. If provided, it must be a strictly positive integer, and
                less than or equal to `window_size`.
            center: Set the labels at the center of the window.
            ddof: Delta Degrees of Freedom; the divisor for a length N window is N - ddof.

        Returns:
            A new expression.

        Examples:
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoFrameT
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> data = {"a": [1.0, 2.0, None, 4.0]}
            >>> df_pd = pd.DataFrame(data)
            >>> df_pl = pl.DataFrame(data)
            >>> df_pa = pa.table(data)

            We define a library agnostic function:

            >>> def agnostic_rolling_var(df_native: IntoFrameT) -> IntoFrameT:
            ...     df = nw.from_native(df_native)
            ...     return df.with_columns(
            ...         b=nw.col("a").rolling_var(window_size=3, min_periods=1)
            ...     ).to_native()

            We can then pass any supported library such as Pandas, Polars, or PyArrow to `func`:

            >>> agnostic_rolling_var(df_pd)
                 a    b
            0  1.0  NaN
            1  2.0  0.5
            2  NaN  0.5
            3  4.0  2.0

            >>> agnostic_rolling_var(df_pl)  #  doctest:+SKIP
            shape: (4, 2)
            ┌──────┬──────┐
            │ a    ┆ b    │
            │ ---  ┆ ---  │
            │ f64  ┆ f64  │
            ╞══════╪══════╡
            │ 1.0  ┆ null │
            │ 2.0  ┆ 0.5  │
            │ null ┆ 0.5  │
            │ 4.0  ┆ 2.0  │
            └──────┴──────┘

            >>> agnostic_rolling_var(df_pa)  #  doctest:+ELLIPSIS
            pyarrow.Table
            a: double
            b: double
            ----
            a: [[1,2,null,4]]
            b: [[nan,0.5,0.5,2]]
        r   r   r   z_`Expr.rolling_var` is being called from the stable API although considered an unstable feature.r   r   r   r   s	           r_   r   zExpr.rolling_var  sJ    d 	@2# 	 	S#:GXYw"#VRV # 
 	
ra   c               l    ddl m} ddlm} d}t	        || |              t
        |   ||||      S )u  Apply a rolling standard deviation (moving standard deviation) over the values.

        !!! warning
            This functionality is considered **unstable**. It may be changed at any point
            without it being considered a breaking change.

        A window of length `window_size` will traverse the values. The resulting values
        will be aggregated to their standard deviation.

        The window at a given row will include the row itself and the `window_size - 1`
        elements before it.

        Arguments:
            window_size: The length of the window in number of elements. It must be a
                strictly positive integer.
            min_periods: The number of values in the window that should be non-null before
                computing a result. If set to `None` (default), it will be set equal to
                `window_size`. If provided, it must be a strictly positive integer, and
                less than or equal to `window_size`
            center: Set the labels at the center of the window.
            ddof: Delta Degrees of Freedom; the divisor for a length N window is N - ddof.

        Returns:
            A new expression.

        Examples:
            >>> import narwhals as nw
            >>> from narwhals.typing import IntoFrameT
            >>> import pandas as pd
            >>> import polars as pl
            >>> import pyarrow as pa
            >>> data = {"a": [1.0, 2.0, None, 4.0]}
            >>> df_pd = pd.DataFrame(data)
            >>> df_pl = pl.DataFrame(data)
            >>> df_pa = pa.table(data)

            We define a library agnostic function:

            >>> def agnostic_rolling_std(df_native: IntoFrameT) -> IntoFrameT:
            ...     df = nw.from_native(df_native)
            ...     return df.with_columns(
            ...         b=nw.col("a").rolling_std(window_size=3, min_periods=1)
            ...     ).to_native()

            We can then pass any supported library such as Pandas, Polars, or PyArrow to `func`:

            >>> agnostic_rolling_std(df_pd)
                 a         b
            0  1.0       NaN
            1  2.0  0.707107
            2  NaN  0.707107
            3  4.0  1.414214

            >>> agnostic_rolling_std(df_pl)  #  doctest:+SKIP
            shape: (4, 2)
            ┌──────┬──────────┐
            │ a    ┆ b        │
            │ ---  ┆ ---      │
            │ f64  ┆ f64      │
            ╞══════╪══════════╡
            │ 1.0  ┆ null     │
            │ 2.0  ┆ 0.707107 │
            │ null ┆ 0.707107 │
            │ 4.0  ┆ 1.414214 │
            └──────┴──────────┘

            >>> agnostic_rolling_std(df_pa)  #  doctest:+ELLIPSIS
            pyarrow.Table
            a: double
            b: double
            ----
            a: [[1,2,null,4]]
            b: [[nan,0.7071067811865476,0.7071067811865476,1.4142135623730951]]
        r   r   r   z_`Expr.rolling_std` is being called from the stable API although considered an unstable feature.r   r   r   r   s	           r_   r   zExpr.rolling_std  r   ra   )r   rT   r   r   r   )
r   r   r   r   r   r   r   r   r   r   r   s   @r_   r   r   l  s   ' !!"&""i
i
 i
 	i

  i
 i
 i
 i
 i
 
i
^ #'[
[
[
  	[

 [
 
[
B #'[
[
[
  	[

 [
 
[
B #'\
\
\
  	\

 \
 \
 
\
D #'_
_
_
  	_

 _
 _
 
_
 _
ra   r   c                      e Zd ZdZy)r$   a  Ordered mapping of column names to their data type.

    Arguments:
        schema: Mapping[str, DType] | Iterable[tuple[str, DType]] | None
            The schema definition given by column names and their associated.
            *instantiated* Narwhals data type. Accepts a mapping or an iterable of tuples.

    Examples:
        Define a schema by passing *instantiated* data types.

        >>> import narwhals as nw
        >>> schema = nw.Schema({"foo": nw.Int8(), "bar": nw.String()})
        >>> schema
        Schema({'foo': Int8, 'bar': String})

        Access the data type associated with a specific column name.

        >>> schema["foo"]
        Int8

        Access various schema properties using the `names`, `dtypes`, and `len` methods.

        >>> schema.names()
        ['foo', 'bar']
        >>> schema.dtypes()
        [Int8, String]
        >>> schema.len()
        2
    N)r   r   r   r   rf   ra   r_   r$   r$   U  s    ra   r$   c                     y r\   rf   objs    r_   
_stableifyr   u      GJra   c                     y r\   rf   r   s    r_   r   r   w  r   ra   c                     y r\   rf   r   s    r_   r   r   y  s    .1ra   c                     y r\   rf   r   s    r_   r   r   {  s    %(ra   c                     y r\   rf   r   s    r_   r   r   }  s    !$ra   c                $   t        | t              r>t        | j                  j	                  t
        j                        | j                        S t        | t              r>t        | j                  j	                  t
        j                        | j                        S t        | t              r>t        | j                  j	                  t
        j                        | j                        S t        | t              rt        | j                        S | S )N)level)
isinstanceNwDataFramer   _compliant_frame_change_versionrI   V1_levelNwLazyFramer   NwSeriesr&   _compliant_seriesNwExprr   _to_compliant_exprr   s    r_   r   r     s     #{#  00<**
 	
 #{#  00<**
 	
 #x !!11'**=**
 	
 #vC**++Jra   .)
eager_onlyseries_onlyc                    y r\   rf   native_objectstrictr  eager_or_interchange_onlyr  allow_seriess         r_   from_nativer         *-ra   )r  r  c                    y r\   rf   r  s         r_   r  r    r  ra   )r  r  r  c                    y r\   rf   r  s         r_   r  r         !$ra   c                    y r\   rf   r  s         r_   r  r         	ra   )r  r  r  c                    y r\   rf   r  s         r_   r  r    r  ra   c                    y r\   rf   r  s         r_   r  r    r  ra   )r  r  r  c                    y r\   rf   r  s         r_   r  r         >Ara   )r  r  r  c                    y r\   rf   r  s         r_   r  r         ra   )r  r  r  r  c                    y r\   rf   r  s         r_   r  r         58ra   c                    y r\   rf   r  s         r_   r  r    r  ra   )r  r  r  r  c                    y r\   rf   r  s         r_   r  r    r  ra   )r  r  r  r  c                    y r\   rf   r  s         r_   r  r    r  ra   )r  r  r  r  c                    y r\   rf   r  s         r_   r  r  (       03ra   )r  r  r  r  c                    y r\   rf   r  s         r_   r  r  4  r  ra   )r  r  r  r  r  c                    y r\   rf   r  s         r_   r  r  @  r!  ra   c                    y r\   rf   r  pass_throughr  r  r  r  s         r_   r  r  L  r  ra   c                    y r\   rf   r*  s         r_   r  r  X  r  ra   c                    y r\   rf   r*  s         r_   r  r  d  r  ra   c                    y r\   rf   r*  s         r_   r  r  p  r  ra   c                    y r\   rf   r*  s         r_   r  r  |  r  ra   c                    y r\   rf   r*  s         r_   r  r    r  ra   c                    y r\   rf   r*  s         r_   r  r    r  ra   c                    y r\   rf   r*  s         r_   r  r    r  ra   c                    y r\   rf   r*  s         r_   r  r    r!  ra   c                    y r\   rf   r*  s         r_   r  r    r  ra   )r+  r  r  r  c                    y r\   rf   r*  s         r_   r  r    r  ra   )r+  r  r  r  c                    y r\   rf   r*  s         r_   r  r    r  ra   )r+  r  r  r  c                    y r\   rf   r*  s         r_   r  r    r&  ra   )r+  r  r  r  c                    y r\   rf   r*  s         r_   r  r    r  ra   r+  r  r  r  r  c                    y r\   rf   r*  s         r_   r  r    r!  ra   F)r  c                    y r\   rf   r*  s         r_   r  r    s     ra   )r  r+  r  r  r  r  c          	         t        | t        t        f      r|s| S t        | t              r|s|r| S t	        ||dd      }t        | |||||t        j                        }t        |      S )a	  Convert `native_object` to Narwhals Dataframe, Lazyframe, or Series.

    Arguments:
        native_object: Raw object from user.
            Depending on the other arguments, input object can be:

            - a Dataframe / Lazyframe / Series supported by Narwhals (pandas, Polars, PyArrow, ...)
            - an object which implements `__narwhals_dataframe__`, `__narwhals_lazyframe__`,
              or `__narwhals_series__`
        strict: Determine what happens if the object can't be converted to Narwhals:

            - `True` or `None` (default): raise an error
            - `False`: pass object through as-is

            **Deprecated** (v1.13.0):
                Please use `pass_through` instead. Note that `strict` is still available
                (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,
                see [perfect backwards compatibility policy](../backcompat.md/).
        pass_through: Determine what happens if the object can't be converted to Narwhals:

            - `False` or `None` (default): raise an error
            - `True`: pass object through as-is
        eager_only: Whether to only allow eager objects:

            - `False` (default): don't require `native_object` to be eager
            - `True`: only convert to Narwhals if `native_object` is eager
        eager_or_interchange_only: Whether to only allow eager objects or objects which
            have interchange-level support in Narwhals:

            - `False` (default): don't require `native_object` to either be eager or to
              have interchange-level support in Narwhals
            - `True`: only convert to Narwhals if `native_object` is eager or has
              interchange-level support in Narwhals

            See [interchange-only support](../extending.md/#interchange-only-support)
            for more details.
        series_only: Whether to only allow Series:

            - `False` (default): don't require `native_object` to be a Series
            - `True`: only convert to Narwhals if `native_object` is a Series
        allow_series: Whether to allow Series (default is only Dataframe / Lazyframe):

            - `False` or `None` (default): don't convert to Narwhals if `native_object` is a Series
            - `True`: allow `native_object` to be a Series

    Returns:
        DataFrame, LazyFrame, Series, or original object, depending
            on which combination of parameters was passed.
    Fpass_through_defaultemit_deprecation_warning)r+  r  r  r  r  version)	r   r   r   r&   rR   rB   rI   r  r   )r  r  r+  r  r  r  r  results           r_   r  r    sv    x -)Y!78-(k\25SXL !";!

F fra   )r  c                    y r\   rf   narwhals_objectr  s     r_   	to_nativerE  ^       ra   c                    y r\   rf   rC  s     r_   rE  rE  b       ra   c                    y r\   rf   rC  s     r_   rE  rE  f  r   ra   c                    y r\   rf   rC  s     r_   rE  rE  h  s    =@ra   r+  c                    y r\   rf   rD  r+  s     r_   rE  rE  j  rF  ra   c                    y r\   rf   rM  s     r_   rE  rE  n  rH  ra   c                    y r\   rf   rM  s     r_   rE  rE  r  s    VYra   c                    y r\   rf   rM  s     r_   rE  rE  t  s    CFra   )r  r+  c                  ddl m} ddlm} ddlm}  |||dd      }t        | |      r| j                  j                  S t        | |      r| j                  j                  S |sdt        |        d}t        |      | S )	a<  Convert Narwhals object to native one.

    Arguments:
        narwhals_object: Narwhals object.
        strict: Determine what happens if `narwhals_object` isn't a Narwhals class:

            - `True` (default): raise an error
            - `False`: pass object through as-is

            **Deprecated** (v1.13.0):
                Please use `pass_through` instead. Note that `strict` is still available
                (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,
                see [perfect backwards compatibility policy](../backcompat.md/).
        pass_through: Determine what happens if `narwhals_object` isn't a Narwhals class:

            - `False` (default): raise an error
            - `True`: pass object through as-is

    Returns:
        Object of class that user started with.
    r   )	BaseFramer%   rQ   Fr=  zExpected Narwhals object, got .)narwhals.dataframerR  narwhals.seriesr&   r   rR   r   r  _native_framer  _native_seriestype	TypeError)rD  r  r+  rR  r&   rR   r   s          r_   rE  rE  x  s    6 -&>25SXL /9-//===/6*00???.tO/D.EQGnra   Tc               V    t        |dd      dfd}| |S  ||       S )a  Decorate function so it becomes dataframe-agnostic.

    This will try to convert any dataframe/series-like object into the Narwhals
    respective DataFrame/Series, while leaving the other parameters as they are.
    Similarly, if the output of the function is a Narwhals DataFrame or Series, it will be
    converted back to the original dataframe/series type, while if the output is another
    type it will be left as is.
    By setting `pass_through=False`, then every input and every output will be required to be a
    dataframe/series-like object.

    Arguments:
        func: Function to wrap in a `from_native`-`to_native` block.
        strict: **Deprecated** (v1.13.0):
            Please use `pass_through` instead. Note that `strict` is still available
            (and won't emit a deprecation warning) if you use `narwhals.stable.v1`,
            see [perfect backwards compatibility policy](../backcompat.md/).

            Determine what happens if the object can't be converted to Narwhals:

            - `True` or `None` (default): raise an error
            - `False`: pass object through as-is
        pass_through: Determine what happens if the object can't be converted to Narwhals:

            - `False` or `None` (default): raise an error
            - `True`: pass object through as-is
        eager_only: Whether to only allow eager objects:

            - `False` (default): don't require `native_object` to be eager
            - `True`: only convert to Narwhals if `native_object` is eager
        eager_or_interchange_only: Whether to only allow eager objects or objects which
            have interchange-level support in Narwhals:

            - `False` (default): don't require `native_object` to either be eager or to
              have interchange-level support in Narwhals
            - `True`: only convert to Narwhals if `native_object` is eager or has
              interchange-level support in Narwhals

            See [interchange-only support](../extending.md/#interchange-only-support)
            for more details.
        series_only: Whether to only allow Series:

            - `False` (default): don't require `native_object` to be a Series
            - `True`: only convert to Narwhals if `native_object` is a Series
        allow_series: Whether to allow Series (default is only Dataframe / Lazyframe):

            - `False` or `None`: don't convert to Narwhals if `native_object` is a Series
            - `True` (default): allow `native_object` to be a Series

    Returns:
        Decorated function.

    Examples:
        Instead of writing

        >>> import narwhals as nw
        >>> def agnostic_group_by_sum(df):
        ...     df = nw.from_native(df, pass_through=True)
        ...     df = df.group_by("a").agg(nw.col("b").sum())
        ...     return nw.to_native(df)

        you can just write

        >>> @nw.narwhalify
        ... def agnostic_group_by_sum(df):
        ...     return df.group_by("a").agg(nw.col("b").sum())
    TFr=  c                <     t               d fd       }|S )Nc                    | D cg c]  }t        |
       } }|j                         D ci c]  \  }}|t        |
       }}}g | |j                         D ch c]  }t        |dd       x}r |        }}|j	                         dkD  rd}t        |       | i |}	t        |	      S c c}w c c}}w c c}w )Nr9  __native_namespace__r   z_Found multiple backends. Make sure that all dataframe/series inputs come from the same backend.rK  )r  itemsvaluesgetattr__len__
ValueErrorrE  )argskwargsargr   valuevbbackendsr   rA  r  r  r  funcr+  r  s             r_   wrapperz.narwhalify.<locals>.decorator.<locals>.wrapper  s%     
  C !-).G +!-    
* $*<<>
 $2KD% k!-).G +!-  $2  
 342&--/22A $:DAAAA 2   !A%w o%4*6*FV,??I

s   B=C)C)rc  r   rd  r   r   r   r   )rj  rk  r  r  r  r+  r  s   ` r_   	decoratorznarwhalify.<locals>.decorator  s)    	t%	@ %	@ 
%	@N ra   )rj  Callable[..., Any]r   rm  rQ   )rj  r  r+  r  r  r  r  rl  s     ````` r_   
narwhalifyrn    s>    X 34RWL) )V | ra   c                 <    t        t        j                               S )u  Instantiate an expression representing all columns.

    Returns:
        A new expression.

    Examples:
        >>> import polars as pl
        >>> import pandas as pd
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 2, 3], "b": [4, 5, 6]}
        >>> df_pd = pd.DataFrame(data)
        >>> df_pl = pl.DataFrame(data)
        >>> df_pa = pa.table(data)

        Let's define a dataframe-agnostic function:

        >>> def agnostic_all(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.all() * 2).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_all`:

        >>> agnostic_all(df_pd)
           a   b
        0  2   8
        1  4  10
        2  6  12

        >>> agnostic_all(df_pl)
        shape: (3, 2)
        ┌─────┬─────┐
        │ a   ┆ b   │
        │ --- ┆ --- │
        │ i64 ┆ i64 │
        ╞═════╪═════╡
        │ 2   ┆ 8   │
        │ 4   ┆ 10  │
        │ 6   ┆ 12  │
        └─────┴─────┘

        >>> agnostic_all(df_pa)
        pyarrow.Table
        a: int64
        b: int64
        ----
        a: [[2,4,6]]
        b: [[8,10,12]]
    )r   nwr   rf   ra   r_   r   r   (	  s    j bffhra   c                 8    t        t        j                  |        S )u!  Creates an expression that references one or more columns by their name(s).

    Arguments:
        names: Name(s) of the columns to use.

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 2], "b": [3, 4]}
        >>> df_pl = pl.DataFrame(data)
        >>> df_pd = pd.DataFrame(data)
        >>> df_pa = pa.table(data)

        We define a dataframe-agnostic function:

        >>> def agnostic_col(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.col("a") * nw.col("b")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_col`:

        >>> agnostic_col(df_pd)
           a
        0  3
        1  8

        >>> agnostic_col(df_pl)
        shape: (2, 1)
        ┌─────┐
        │ a   │
        │ --- │
        │ i64 │
        ╞═════╡
        │ 3   │
        │ 8   │
        └─────┘

        >>> agnostic_col(df_pa)
        pyarrow.Table
        a: int64
        ----
        a: [[3,8]]
    )r   rp  col)namess    r_   rr  rr  `	  s    h bffen%%ra   c                 8    t        t        j                  |        S )u  Creates an expression that references one or more columns by their index(es).

    Notes:
        `nth` is not supported for Polars version<1.0.0. Please use
        [`narwhals.col`][] instead.

    Arguments:
        indices: One or more indices representing the columns to retrieve.

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 2], "b": [3, 4]}
        >>> df_pl = pl.DataFrame(data)
        >>> df_pd = pd.DataFrame(data)
        >>> df_pa = pa.table(data)

        We define a dataframe-agnostic function:

        >>> def agnostic_nth(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.nth(0) * 2).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to `agnostic_nth`:

        >>> agnostic_nth(df_pd)
           a
        0  2
        1  4

        >>> agnostic_nth(df_pl)
        shape: (2, 1)
        ┌─────┐
        │ a   │
        │ --- │
        │ i64 │
        ╞═════╡
        │ 2   │
        │ 4   │
        └─────┘

        >>> agnostic_nth(df_pa)
        pyarrow.Table
        a: int64
        ----
        a: [[2,4]]
    )r   rp  nth)indicess    r_   ru  ru  	  s    n bffg&''ra   c                 <    t        t        j                               S )u  Return the number of rows.

    Returns:
        A new expression.

    Examples:
        >>> import polars as pl
        >>> import pandas as pd
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 2], "b": [5, 10]}
        >>> df_pd = pd.DataFrame(data)
        >>> df_pl = pl.DataFrame(data)
        >>> df_pa = pa.table(data)

        Let's define a dataframe-agnostic function:

        >>> def agnostic_len(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.len()).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_len`:

        >>> agnostic_len(df_pd)
           len
        0    2
        >>> agnostic_len(df_pl)
        shape: (1, 1)
        ┌─────┐
        │ len │
        │ --- │
        │ u32 │
        ╞═════╡
        │ 2   │
        └─────┘
        >>> agnostic_len(df_pa)
        pyarrow.Table
        len: int64
        ----
        len: [[2]]
    )r   rp  lenrf   ra   r_   rx  rx  	  s    Z bffhra   c                @    t        t        j                  | |            S )u@  Return an expression representing a literal value.

    Arguments:
        value: The value to use as literal.
        dtype: The data type of the literal value. If not provided, the data type will
            be inferred.

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 2]}
        >>> df_pl = pl.DataFrame(data)
        >>> df_pd = pd.DataFrame(data)
        >>> df_pa = pa.table(data)

        We define a dataframe-agnostic function:

        >>> def agnostic_lit(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.with_columns(nw.lit(3)).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_lit`:

        >>> agnostic_lit(df_pd)
           a  literal
        0  1        3
        1  2        3

        >>> agnostic_lit(df_pl)
        shape: (2, 2)
        ┌─────┬─────────┐
        │ a   ┆ literal │
        │ --- ┆ ---     │
        │ i64 ┆ i32     │
        ╞═════╪═════════╡
        │ 1   ┆ 3       │
        │ 2   ┆ 3       │
        └─────┴─────────┘

        >>> agnostic_lit(df_pa)
        pyarrow.Table
        a: int64
        literal: int64
        ----
        a: [[1,2]]
        literal: [[3,3]]
    )r   rp  lit)rf  dtypes     r_   rz  rz  
  s    p bffUE*++ra   c                 8    t        t        j                  |        S )u!  Return the minimum value.

    Note:
       Syntactic sugar for ``nw.col(columns).min()``.

    Arguments:
        columns: Name(s) of the columns to use in the aggregation function.

    Returns:
        A new expression.

    Examples:
        >>> import polars as pl
        >>> import pandas as pd
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 2], "b": [5, 10]}
        >>> df_pd = pd.DataFrame(data)
        >>> df_pl = pl.DataFrame(data)
        >>> df_pa = pa.table(data)

        Let's define a dataframe-agnostic function:

        >>> def agnostic_min(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.min("b")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_min`:

        >>> agnostic_min(df_pd)
           b
        0  5

        >>> agnostic_min(df_pl)
        shape: (1, 1)
        ┌─────┐
        │ b   │
        │ --- │
        │ i64 │
        ╞═════╡
        │ 5   │
        └─────┘

        >>> agnostic_min(df_pa)
        pyarrow.Table
        b: int64
        ----
        b: [[5]]
    )r   rp  mincolumnss    r_   r}  r}  <
      j bffg&''ra   c                 8    t        t        j                  |        S )u!  Return the maximum value.

    Note:
       Syntactic sugar for ``nw.col(columns).max()``.

    Arguments:
        columns: Name(s) of the columns to use in the aggregation function.

    Returns:
        A new expression.

    Examples:
        >>> import polars as pl
        >>> import pandas as pd
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 2], "b": [5, 10]}
        >>> df_pd = pd.DataFrame(data)
        >>> df_pl = pl.DataFrame(data)
        >>> df_pa = pa.table(data)

        Let's define a dataframe-agnostic function:

        >>> def agnostic_max(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.max("a")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_max`:

        >>> agnostic_max(df_pd)
           a
        0  2

        >>> agnostic_max(df_pl)
        shape: (1, 1)
        ┌─────┐
        │ a   │
        │ --- │
        │ i64 │
        ╞═════╡
        │ 2   │
        └─────┘

        >>> agnostic_max(df_pa)
        pyarrow.Table
        a: int64
        ----
        a: [[2]]
    )r   rp  maxr~  s    r_   r  r  t
  r  ra   c                 8    t        t        j                  |        S )u  Get the mean value.

    Note:
        Syntactic sugar for ``nw.col(columns).mean()``

    Arguments:
        columns: Name(s) of the columns to use in the aggregation function

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 8, 3]}
        >>> df_pl = pl.DataFrame(data)
        >>> df_pd = pd.DataFrame(data)
        >>> df_pa = pa.table(data)

        We define a dataframe agnostic function:

        >>> def agnostic_mean(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.mean("a")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_mean`:

        >>> agnostic_mean(df_pd)
             a
        0  4.0

        >>> agnostic_mean(df_pl)
        shape: (1, 1)
        ┌─────┐
        │ a   │
        │ --- │
        │ f64 │
        ╞═════╡
        │ 4.0 │
        └─────┘

        >>> agnostic_mean(df_pa)
        pyarrow.Table
        a: double
        ----
        a: [[4]]
    )r   rp  meanr~  s    r_   r  r  
  s    j bggw'((ra   c                 8    t        t        j                  |        S )u  Get the median value.

    Notes:
        - Syntactic sugar for ``nw.col(columns).median()``
        - Results might slightly differ across backends due to differences in the
            underlying algorithms used to compute the median.

    Arguments:
        columns: Name(s) of the columns to use in the aggregation function

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [4, 5, 2]}
        >>> df_pd = pd.DataFrame(data)
        >>> df_pl = pl.DataFrame(data)
        >>> df_pa = pa.table(data)

        Let's define a dataframe agnostic function:

        >>> def agnostic_median(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.median("a")).to_native()

        We can then pass any supported library such as pandas, Polars, or
        PyArrow to `agnostic_median`:

        >>> agnostic_median(df_pd)
             a
        0  4.0

        >>> agnostic_median(df_pl)
        shape: (1, 1)
        ┌─────┐
        │ a   │
        │ --- │
        │ f64 │
        ╞═════╡
        │ 4.0 │
        └─────┘

        >>> agnostic_median(df_pa)
        pyarrow.Table
        a: double
        ----
        a: [[4]]
    )r   rp  medianr~  s    r_   r  r  
  s    n bii)**ra   c                 8    t        t        j                  |        S )u  Sum all values.

    Note:
        Syntactic sugar for ``nw.col(columns).sum()``

    Arguments:
        columns: Name(s) of the columns to use in the aggregation function

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 2]}
        >>> df_pl = pl.DataFrame(data)
        >>> df_pd = pd.DataFrame(data)
        >>> df_pa = pa.table(data)

        We define a dataframe-agnostic function:

        >>> def agnostic_sum(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.sum("a")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_sum`:

        >>> agnostic_sum(df_pd)
           a
        0  3

        >>> agnostic_sum(df_pl)
        shape: (1, 1)
        ┌─────┐
        │ a   │
        │ --- │
        │ i64 │
        ╞═════╡
        │ 3   │
        └─────┘

        >>> agnostic_sum(df_pa)
        pyarrow.Table
        a: int64
        ----
        a: [[3]]
    )r   rp  sumr~  s    r_   r  r    r  ra   c                 8    t        t        j                  |        S )u  Sum all values horizontally across columns.

    Warning:
        Unlike Polars, we support horizontal sum over numeric columns only.

    Arguments:
        exprs: Name(s) of the columns to use in the aggregation function. Accepts
            expression input.

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 2, 3], "b": [5, 10, None]}
        >>> df_pl = pl.DataFrame(data)
        >>> df_pd = pd.DataFrame(data)
        >>> df_pa = pa.table(data)

        We define a dataframe-agnostic function:

        >>> def agnostic_sum_horizontal(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.sum_horizontal("a", "b")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to `agnostic_sum_horizontal`:

        >>> agnostic_sum_horizontal(df_pd)
              a
        0   6.0
        1  12.0
        2   3.0

        >>> agnostic_sum_horizontal(df_pl)
        shape: (3, 1)
        ┌─────┐
        │ a   │
        │ --- │
        │ i64 │
        ╞═════╡
        │ 6   │
        │ 12  │
        │ 3   │
        └─────┘

        >>> agnostic_sum_horizontal(df_pa)
        pyarrow.Table
        a: int64
        ----
        a: [[6,12,3]]
    )r   rp  sum_horizontalexprss    r_   r  r  V  s    r b''/00ra   c                 8    t        t        j                  |        S )u2	  Compute the bitwise AND horizontally across columns.

    Arguments:
        exprs: Name(s) of the columns to use in the aggregation function. Accepts
            expression input.

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {
        ...     "a": [False, False, True, True, False, None],
        ...     "b": [False, True, True, None, None, None],
        ... }
        >>> df_pl = pl.DataFrame(data)
        >>> df_pd = pd.DataFrame(data).convert_dtypes(dtype_backend="pyarrow")
        >>> df_pa = pa.table(data)

        We define a dataframe-agnostic function:

        >>> def agnostic_all_horizontal(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select("a", "b", all=nw.all_horizontal("a", "b")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_all_horizontal`:

        >>> agnostic_all_horizontal(df_pd)
               a      b    all
        0  False  False  False
        1  False   True  False
        2   True   True   True
        3   True   <NA>   <NA>
        4  False   <NA>  False
        5   <NA>   <NA>   <NA>

        >>> agnostic_all_horizontal(df_pl)
        shape: (6, 3)
        ┌───────┬───────┬───────┐
        │ a     ┆ b     ┆ all   │
        │ ---   ┆ ---   ┆ ---   │
        │ bool  ┆ bool  ┆ bool  │
        ╞═══════╪═══════╪═══════╡
        │ false ┆ false ┆ false │
        │ false ┆ true  ┆ false │
        │ true  ┆ true  ┆ true  │
        │ true  ┆ null  ┆ null  │
        │ false ┆ null  ┆ false │
        │ null  ┆ null  ┆ null  │
        └───────┴───────┴───────┘

        >>> agnostic_all_horizontal(df_pa)
        pyarrow.Table
        a: bool
        b: bool
        all: bool
        ----
        a: [[false,false,true,true,false,null]]
        b: [[false,true,true,null,null,null]]
        all: [[false,false,true,null,false,null]]
    )r   rp  all_horizontalr  s    r_   r  r        H b''/00ra   c                 8    t        t        j                  |        S )u/	  Compute the bitwise OR horizontally across columns.

    Arguments:
        exprs: Name(s) of the columns to use in the aggregation function. Accepts
            expression input.

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {
        ...     "a": [False, False, True, True, False, None],
        ...     "b": [False, True, True, None, None, None],
        ... }
        >>> df_pl = pl.DataFrame(data)
        >>> df_pd = pd.DataFrame(data).convert_dtypes(dtype_backend="pyarrow")
        >>> df_pa = pa.table(data)

        We define a dataframe-agnostic function:

        >>> def agnostic_any_horizontal(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select("a", "b", any=nw.any_horizontal("a", "b")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_any_horizontal`:

        >>> agnostic_any_horizontal(df_pd)
               a      b    any
        0  False  False  False
        1  False   True   True
        2   True   True   True
        3   True   <NA>   True
        4  False   <NA>   <NA>
        5   <NA>   <NA>   <NA>

        >>> agnostic_any_horizontal(df_pl)
        shape: (6, 3)
        ┌───────┬───────┬───────┐
        │ a     ┆ b     ┆ any   │
        │ ---   ┆ ---   ┆ ---   │
        │ bool  ┆ bool  ┆ bool  │
        ╞═══════╪═══════╪═══════╡
        │ false ┆ false ┆ false │
        │ false ┆ true  ┆ true  │
        │ true  ┆ true  ┆ true  │
        │ true  ┆ null  ┆ true  │
        │ false ┆ null  ┆ null  │
        │ null  ┆ null  ┆ null  │
        └───────┴───────┴───────┘

        >>> agnostic_any_horizontal(df_pa)
        pyarrow.Table
        a: bool
        b: bool
        any: bool
        ----
        a: [[false,false,true,true,false,null]]
        b: [[false,true,true,null,null,null]]
        any: [[false,true,true,true,null,null]]
    )r   rp  any_horizontalr  s    r_   r  r    r  ra   c                 8    t        t        j                  |        S )ua  Compute the mean of all values horizontally across columns.

    Arguments:
        exprs: Name(s) of the columns to use in the aggregation function. Accepts
            expression input.

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {
        ...     "a": [1, 8, 3],
        ...     "b": [4, 5, None],
        ...     "c": ["x", "y", "z"],
        ... }
        >>> df_pl = pl.DataFrame(data)
        >>> df_pd = pd.DataFrame(data)
        >>> df_pa = pa.table(data)

        We define a dataframe-agnostic function that computes the horizontal mean of "a"
        and "b" columns:

        >>> def agnostic_mean_horizontal(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.mean_horizontal("a", "b")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_mean_horizontal`:

        >>> agnostic_mean_horizontal(df_pd)
             a
        0  2.5
        1  6.5
        2  3.0

        >>> agnostic_mean_horizontal(df_pl)
        shape: (3, 1)
        ┌─────┐
        │ a   │
        │ --- │
        │ f64 │
        ╞═════╡
        │ 2.5 │
        │ 6.5 │
        │ 3.0 │
        └─────┘

        >>> agnostic_mean_horizontal(df_pa)
        pyarrow.Table
        a: double
        ----
        a: [[2.5,6.5,3]]
    )r   rp  mean_horizontalr  s    r_   r  r     s    x b((%011ra   c                 8    t        t        j                  |        S )uI  Get the minimum value horizontally across columns.

    Notes:
        We support `min_horizontal` over numeric columns only.

    Arguments:
        exprs: Name(s) of the columns to use in the aggregation function. Accepts
            expression input.

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {
        ...     "a": [1, 8, 3],
        ...     "b": [4, 5, None],
        ...     "c": ["x", "y", "z"],
        ... }

        We define a dataframe-agnostic function that computes the horizontal min of "a"
        and "b" columns:

        >>> def agnostic_min_horizontal(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.min_horizontal("a", "b")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_min_horizontal`:

        >>> agnostic_min_horizontal(pd.DataFrame(data))
             a
        0  1.0
        1  5.0
        2  3.0

        >>> agnostic_min_horizontal(pl.DataFrame(data))
        shape: (3, 1)
        ┌─────┐
        │ a   │
        │ --- │
        │ i64 │
        ╞═════╡
        │ 1   │
        │ 5   │
        │ 3   │
        └─────┘

        >>> agnostic_min_horizontal(pa.table(data))
        pyarrow.Table
        a: int64
        ----
        a: [[1,5,3]]
    )r   rp  min_horizontalr  s    r_   r  r  _      x b''/00ra   c                 8    t        t        j                  |        S )uI  Get the maximum value horizontally across columns.

    Notes:
        We support `max_horizontal` over numeric columns only.

    Arguments:
        exprs: Name(s) of the columns to use in the aggregation function. Accepts
            expression input.

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {
        ...     "a": [1, 8, 3],
        ...     "b": [4, 5, None],
        ...     "c": ["x", "y", "z"],
        ... }

        We define a dataframe-agnostic function that computes the horizontal max of "a"
        and "b" columns:

        >>> def agnostic_max_horizontal(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(nw.max_horizontal("a", "b")).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_max_horizontal`:

        >>> agnostic_max_horizontal(pd.DataFrame(data))
             a
        0  4.0
        1  8.0
        2  3.0

        >>> agnostic_max_horizontal(pl.DataFrame(data))
        shape: (3, 1)
        ┌─────┐
        │ a   │
        │ --- │
        │ i64 │
        ╞═════╡
        │ 4   │
        │ 8   │
        │ 3   │
        └─────┘

        >>> agnostic_max_horizontal(pa.table(data))
        pyarrow.Table
        a: int64
        ----
        a: [[4,8,3]]
    )r   rp  max_horizontalr  s    r_   r  r    r  ra   verticalhowc                    y r\   rf   r^  r  s     r_   concatr        
 ra   c                    y r\   rf   r  s     r_   r  r    r  ra   c               B    t        t        j                  | |            S )u^  Concatenate multiple DataFrames, LazyFrames into a single entity.

    Arguments:
        items: DataFrames, LazyFrames to concatenate.
        how: concatenating strategy:

            - vertical: Concatenate vertically. Column names must match.
            - horizontal: Concatenate horizontally. If lengths don't match, then
                missing rows are filled with null values.
            - diagonal: Finds a union between the column schemas and fills missing column
                values with null.

    Returns:
        A new DataFrame, Lazyframe resulting from the concatenation.

    Raises:
        TypeError: The items to concatenate should either all be eager, or all lazy

    Examples:
        Let's take an example of vertical concatenation:

        >>> import pandas as pd
        >>> import polars as pl
        >>> import narwhals as nw
        >>> data_1 = {"a": [1, 2, 3], "b": [4, 5, 6]}
        >>> data_2 = {"a": [5, 2], "b": [1, 4]}

        >>> df_pd_1 = pd.DataFrame(data_1)
        >>> df_pd_2 = pd.DataFrame(data_2)
        >>> df_pl_1 = pl.DataFrame(data_1)
        >>> df_pl_2 = pl.DataFrame(data_2)

        Let's define a dataframe-agnostic function:

        >>> @nw.narwhalify
        ... def agnostic_vertical_concat(df1, df2):
        ...     return nw.concat([df1, df2], how="vertical")

        >>> agnostic_vertical_concat(df_pd_1, df_pd_2)
           a  b
        0  1  4
        1  2  5
        2  3  6
        0  5  1
        1  2  4
        >>> agnostic_vertical_concat(df_pl_1, df_pl_2)
        shape: (5, 2)
        ┌─────┬─────┐
        │ a   ┆ b   │
        │ --- ┆ --- │
        │ i64 ┆ i64 │
        ╞═════╪═════╡
        │ 1   ┆ 4   │
        │ 2   ┆ 5   │
        │ 3   ┆ 6   │
        │ 5   ┆ 1   │
        │ 2   ┆ 4   │
        └─────┴─────┘

        Let's look at case a for horizontal concatenation:

        >>> import pandas as pd
        >>> import polars as pl
        >>> import narwhals as nw
        >>> data_1 = {"a": [1, 2, 3], "b": [4, 5, 6]}
        >>> data_2 = {"c": [5, 2], "d": [1, 4]}

        >>> df_pd_1 = pd.DataFrame(data_1)
        >>> df_pd_2 = pd.DataFrame(data_2)
        >>> df_pl_1 = pl.DataFrame(data_1)
        >>> df_pl_2 = pl.DataFrame(data_2)

        Defining a dataframe-agnostic function:

        >>> @nw.narwhalify
        ... def agnostic_horizontal_concat(df1, df2):
        ...     return nw.concat([df1, df2], how="horizontal")

        >>> agnostic_horizontal_concat(df_pd_1, df_pd_2)
           a  b    c    d
        0  1  4  5.0  1.0
        1  2  5  2.0  4.0
        2  3  6  NaN  NaN

        >>> agnostic_horizontal_concat(df_pl_1, df_pl_2)
        shape: (3, 4)
        ┌─────┬─────┬──────┬──────┐
        │ a   ┆ b   ┆ c    ┆ d    │
        │ --- ┆ --- ┆ ---  ┆ ---  │
        │ i64 ┆ i64 ┆ i64  ┆ i64  │
        ╞═════╪═════╪══════╪══════╡
        │ 1   ┆ 4   ┆ 5    ┆ 1    │
        │ 2   ┆ 5   ┆ 2    ┆ 4    │
        │ 3   ┆ 6   ┆ null ┆ null │
        └─────┴─────┴──────┴──────┘

        Let's look at case a for diagonal concatenation:

        >>> import pandas as pd
        >>> import polars as pl
        >>> import narwhals as nw
        >>> data_1 = {"a": [1, 2], "b": [3.5, 4.5]}
        >>> data_2 = {"a": [3, 4], "z": ["x", "y"]}

        >>> df_pd_1 = pd.DataFrame(data_1)
        >>> df_pd_2 = pd.DataFrame(data_2)
        >>> df_pl_1 = pl.DataFrame(data_1)
        >>> df_pl_2 = pl.DataFrame(data_2)

        Defining a dataframe-agnostic function:

        >>> @nw.narwhalify
        ... def agnostic_diagonal_concat(df1, df2):
        ...     return nw.concat([df1, df2], how="diagonal")

        >>> agnostic_diagonal_concat(df_pd_1, df_pd_2)
           a    b    z
        0  1  3.5  NaN
        1  2  4.5  NaN
        0  3  NaN    x
        1  4  NaN    y

        >>> agnostic_diagonal_concat(df_pl_1, df_pl_2)
        shape: (4, 3)
        ┌─────┬──────┬──────┐
        │ a   ┆ b    ┆ z    │
        │ --- ┆ ---  ┆ ---  │
        │ i64 ┆ f64  ┆ str  │
        ╞═════╪══════╪══════╡
        │ 1   ┆ 3.5  ┆ null │
        │ 2   ┆ 4.5  ┆ null │
        │ 3   ┆ null ┆ x    │
        │ 4   ┆ null ┆ y    │
        └─────┴──────┴──────┘
    r  )r   rp  r  r  s     r_   r  r    s    X bii3/00ra    	separatorr   c               H    t        t        j                  | g|||d      S )uI
  Horizontally concatenate columns into a single string column.

    Arguments:
        exprs: Columns to concatenate into a single string column. Accepts expression
            input. Strings are parsed as column names, other non-expression inputs are
            parsed as literals. Non-`String` columns are cast to `String`.
        *more_exprs: Additional columns to concatenate into a single string column,
            specified as positional arguments.
        separator: String that will be used to separate the values of each column.
        ignore_nulls: Ignore null values (default is `False`).
            If set to `False`, null values will be propagated and if the row contains any
            null values, the output is null.

    Returns:
        A new expression.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {
        ...     "a": [1, 2, 3],
        ...     "b": ["dogs", "cats", None],
        ...     "c": ["play", "swim", "walk"],
        ... }

        We define a dataframe-agnostic function that computes the horizontal string
        concatenation of different columns

        >>> def agnostic_concat_str(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.select(
        ...         nw.concat_str(
        ...             [
        ...                 nw.col("a") * 2,
        ...                 nw.col("b"),
        ...                 nw.col("c"),
        ...             ],
        ...             separator=" ",
        ...         ).alias("full_sentence")
        ...     ).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow
        to `agnostic_concat_str`:

        >>> agnostic_concat_str(pd.DataFrame(data))
          full_sentence
        0   2 dogs play
        1   4 cats swim
        2          None

        >>> agnostic_concat_str(pl.DataFrame(data))
        shape: (3, 1)
        ┌───────────────┐
        │ full_sentence │
        │ ---           │
        │ str           │
        ╞═══════════════╡
        │ 2 dogs play   │
        │ 4 cats swim   │
        │ null          │
        └───────────────┘

        >>> agnostic_concat_str(pa.table(data))
        pyarrow.Table
        full_sentence: string
        ----
        full_sentence: [["2 dogs play","4 cats swim",null]]
    r  )r   rp  
concat_str)r  r  r   
more_exprss       r_   r  r  |  s*    \ 
eYjYILY ra   c                  2     e Zd Zedd       Zd fdZ xZS )r   c                      | |j                    S r\   )_predicates)clsr   s     r_   	from_whenzWhen.from_when  s    D$$%%ra   c                H    t         j                  t        |   |            S r\   )r   	from_thenr}   thenr^   rf  r~   s     r_   r  z	When.then  s    ~~egl5122ra   )r   NwWhenr   rT   )rf  r   r   r   )r   r   r   classmethodr  r  r   r   s   @r_   r   r     s    & &3 3ra   r   c                  2     e Zd Zedd       Zd fdZ xZS )r   c                &     | |j                         S r\   )r
  )r  r  s     r_   r  zThen.from_then  s    4**++ra   c                4    t        t        | 	  |            S r\   )r   r}   	otherwiser  s     r_   r  zThen.otherwise  s    %'+E233ra   )r  NwThenr   rT   )rf  r   r   r   )r   r   r   r  r  r  r   r   s   @r_   r   r     s    , ,4 4ra   r   c                 8    t         j                  t        |        S )u
  Start a `when-then-otherwise` expression.

    Expression similar to an `if-else` statement in Python. Always initiated by a
    `pl.when(<condition>).then(<value if condition>)`, and optionally followed by
    chaining one or more `.when(<condition>).then(<value>)` statements.
    Chained when-then operations should be read as Python `if, elif, ... elif`
    blocks, not as `if, if, ... if`, i.e. the first condition that evaluates to
    `True` will be picked.
    If none of the conditions are `True`, an optional
    `.otherwise(<value if all statements are false>)` can be appended at the end.
    If not appended, and none of the conditions are `True`, `None` will be returned.

    Arguments:
        predicates: Condition(s) that must be met in order to apply the subsequent
            statement. Accepts one or more boolean expressions, which are implicitly
            combined with `&`. String input is parsed as a column name.

    Returns:
        A "when" object, which `.then` can be called on.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>>
        >>> data = {"a": [1, 2, 3], "b": [5, 10, 15]}
        >>> df_pl = pl.DataFrame(data)
        >>> df_pd = pd.DataFrame(data)
        >>> df_pa = pa.table(data)

        We define a dataframe-agnostic function:

        >>> def agnostic_when_then_otherwise(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return df.with_columns(
        ...         nw.when(nw.col("a") < 3).then(5).otherwise(6).alias("a_when")
        ...     ).to_native()

        We can pass any supported library such as Pandas, Polars, or PyArrow to
        `agnostic_when_then_otherwise`:

        >>> agnostic_when_then_otherwise(df_pd)
           a   b  a_when
        0  1   5       5
        1  2  10       5
        2  3  15       6

        >>> agnostic_when_then_otherwise(df_pl)
        shape: (3, 3)
        ┌─────┬─────┬────────┐
        │ a   ┆ b   ┆ a_when │
        │ --- ┆ --- ┆ ---    │
        │ i64 ┆ i64 ┆ i32    │
        ╞═════╪═════╪════════╡
        │ 1   ┆ 5   ┆ 5      │
        │ 2   ┆ 10  ┆ 5      │
        │ 3   ┆ 15  ┆ 6      │
        └─────┴─────┴────────┘

        >>> agnostic_when_then_otherwise(df_pa)
        pyarrow.Table
        a: int64
        b: int64
        a_when: int64
        ----
        a: [[1,2,3]]
        b: [[5,10,15]]
        a_when: [[5,5,6]]
    )r   r  nw_when)
predicatess    r_   r   r     s    P >>':.//ra   c          	     P    t        t        | |||t        j                              S )a  Instantiate Narwhals Series from iterable (e.g. list or array).

    Arguments:
        name: Name of resulting Series.
        values: Values of make Series from.
        dtype: (Narwhals) dtype. If not provided, the native library
            may auto-infer it from `values`.
        native_namespace: The native library to use for DataFrame creation.

    Returns:
        A new Series

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT, IntoSeriesT
        >>> data = {"a": [1, 2, 3], "b": [4, 5, 6]}

        Let's define a dataframe-agnostic function:

        >>> def agnostic_new_series(df_native: IntoFrameT) -> IntoSeriesT:
        ...     values = [4, 1, 2, 3]
        ...     native_namespace = nw.get_native_namespace(df_native)
        ...     return nw.new_series(
        ...         name="a",
        ...         values=values,
        ...         dtype=nw.Int32,
        ...         native_namespace=native_namespace,
        ...     ).to_native()

        We can then pass any supported eager library, such as pandas / Polars / PyArrow:

        >>> agnostic_new_series(pd.DataFrame(data))
        0    4
        1    1
        2    2
        3    3
        Name: a, dtype: int32
        >>> agnostic_new_series(pl.DataFrame(data))  # doctest: +NORMALIZE_WHITESPACE
        shape: (4,)
        Series: 'a' [i32]
        [
           4
           1
           2
           3
        ]
        >>> agnostic_new_series(pa.table(data))
        <pyarrow.lib.ChunkedArray object at ...>
        [
          [
            4,
            1,
            2,
            3
          ]
        ]
    native_namespacer@  )r   r   rI   r  )r   r_  r{  r  s       r_   
new_seriesr  ,  s-    F -JJ	
 ra   c               .    t        t        | |            S )a  Construct a DataFrame from an object which supports the PyCapsule Interface.

    Arguments:
        native_frame: Object which implements `__arrow_c_stream__`.
        native_namespace: The native library to use for DataFrame creation.

    Returns:
        A new DataFrame.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>> data = {"a": [1, 2, 3], "b": [4, 5, 6]}

        Let's define a dataframe-agnostic function which creates a PyArrow
        Table.

        >>> def agnostic_to_arrow(df_native: IntoFrameT) -> IntoFrameT:
        ...     df = nw.from_native(df_native)
        ...     return nw.from_arrow(df, native_namespace=pa).to_native()

        Let's see what happens when passing pandas / Polars input:

        >>> agnostic_to_arrow(pd.DataFrame(data))
        pyarrow.Table
        a: int64
        b: int64
        ----
        a: [[1,2,3]]
        b: [[4,5,6]]
        >>> agnostic_to_arrow(pl.DataFrame(data))
        pyarrow.Table
        a: int64
        b: int64
        ----
        a: [[1,2,3]]
        b: [[4,5,6]]
    r  )r   nw_from_arrow)native_framer  s     r_   r!   r!   z  s    X l5EF ra   r  c               N    t        t        | ||t        j                              S )u  Instantiate DataFrame from dictionary.

    Indexes (if present, for pandas-like backends) are aligned following
    the [left-hand-rule](../pandas_like_concepts/pandas_index.md/).

    Notes:
        For pandas-like dataframes, conversion to schema is applied after dataframe
        creation.

    Arguments:
        data: Dictionary to create DataFrame from.
        schema: The DataFrame schema as Schema or dict of {name: type}.
        native_namespace: The native library to use for DataFrame creation. Only
            necessary if inputs are not Narwhals Series.

    Returns:
        A new DataFrame.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrameT
        >>> data = {"a": [1, 2, 3], "b": [4, 5, 6]}

        Let's create a new dataframe of the same class as the dataframe we started with, from a dict of new data:

        >>> def agnostic_from_dict(df_native: IntoFrameT) -> IntoFrameT:
        ...     new_data = {"c": [5, 2], "d": [1, 4]}
        ...     native_namespace = nw.get_native_namespace(df_native)
        ...     return nw.from_dict(new_data, native_namespace=native_namespace).to_native()

        Let's see what happens when passing pandas, Polars or PyArrow input:

        >>> agnostic_from_dict(pd.DataFrame(data))
           c  d
        0  5  1
        1  2  4
        >>> agnostic_from_dict(pl.DataFrame(data))
        shape: (2, 2)
        ┌─────┬─────┐
        │ c   ┆ d   │
        │ --- ┆ --- │
        │ i64 ┆ i64 │
        ╞═════╪═════╡
        │ 5   ┆ 1   │
        │ 2   ┆ 4   │
        └─────┴─────┘
        >>> agnostic_from_dict(pa.table(data))
        pyarrow.Table
        c: int64
        d: int64
        ----
        c: [[5,2]]
        d: [[1,4]]
    r  )r   r   rI   r  dataschemar  s      r_   	from_dictr    s*    ~ -JJ		
 ra   c               N    t        t        | ||t        j                              S )uu  Construct a DataFrame from a NumPy ndarray.

    Notes:
        Only row orientation is currently supported.

        For pandas-like dataframes, conversion to schema is applied after dataframe
        creation.

    Arguments:
        data: Two-dimensional data represented as a NumPy ndarray.
        schema: The DataFrame schema as Schema, dict of {name: type}, or a list of str.
        native_namespace: The native library to use for DataFrame creation.

    Returns:
        A new DataFrame.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> import numpy as np
        >>> from narwhals.typing import IntoFrameT
        >>> data = {"a": [1, 2], "b": [3, 4]}

        Let's create a new dataframe of the same class as the dataframe we started with, from a NumPy ndarray of new data:

        >>> def agnostic_from_numpy(df_native: IntoFrameT) -> IntoFrameT:
        ...     new_data = np.array([[5, 2, 1], [1, 4, 3]])
        ...     df = nw.from_native(df_native)
        ...     native_namespace = nw.get_native_namespace(df)
        ...     return nw.from_numpy(new_data, native_namespace=native_namespace).to_native()

        Let's see what happens when passing pandas, Polars or PyArrow input:

        >>> agnostic_from_numpy(pd.DataFrame(data))
           column_0  column_1  column_2
        0         5         2         1
        1         1         4         3
        >>> agnostic_from_numpy(pl.DataFrame(data))
        shape: (2, 3)
        ┌──────────┬──────────┬──────────┐
        │ column_0 ┆ column_1 ┆ column_2 │
        │ ---      ┆ ---      ┆ ---      │
        │ i64      ┆ i64      ┆ i64      │
        ╞══════════╪══════════╪══════════╡
        │ 5        ┆ 2        ┆ 1        │
        │ 1        ┆ 4        ┆ 3        │
        └──────────┴──────────┴──────────┘
        >>> agnostic_from_numpy(pa.table(data))
        pyarrow.Table
        column_0: int64
        column_1: int64
        column_2: int64
        ----
        column_0: [[5,1]]
        column_1: [[2,4]]
        column_2: [[1,3]]

        Let's specify the column names:

        >>> def agnostic_from_numpy(df_native: IntoFrameT) -> IntoFrameT:
        ...     new_data = np.array([[5, 2, 1], [1, 4, 3]])
        ...     schema = ["c", "d", "e"]
        ...     df = nw.from_native(df_native)
        ...     native_namespace = nw.get_native_namespace(df)
        ...     return nw.from_numpy(
        ...         new_data, native_namespace=native_namespace, schema=schema
        ...     ).to_native()

        Let's see the modified outputs:

        >>> agnostic_from_numpy(pd.DataFrame(data))
           c  d  e
        0  5  2  1
        1  1  4  3
        >>> agnostic_from_numpy(pl.DataFrame(data))
        shape: (2, 3)
        ┌─────┬─────┬─────┐
        │ c   ┆ d   ┆ e   │
        │ --- ┆ --- ┆ --- │
        │ i64 ┆ i64 ┆ i64 │
        ╞═════╪═════╪═════╡
        │ 5   ┆ 2   ┆ 1   │
        │ 1   ┆ 4   ┆ 3   │
        └─────┴─────┴─────┘
        >>> agnostic_from_numpy(pa.table(data))
        pyarrow.Table
        c: int64
        d: int64
        e: int64
        ----
        c: [[5,1]]
        d: [[2,4]]
        e: [[1,3]]

        Let's modify the function so that it specifies the schema:

        >>> def agnostic_from_numpy(df_native: IntoFrameT) -> IntoFrameT:
        ...     new_data = np.array([[5, 2, 1], [1, 4, 3]])
        ...     schema = {"c": nw.Int16(), "d": nw.Float32(), "e": nw.Int8()}
        ...     df = nw.from_native(df_native)
        ...     native_namespace = nw.get_native_namespace(df)
        ...     return nw.from_numpy(
        ...         new_data, native_namespace=native_namespace, schema=schema
        ...     ).to_native()

        Let's see the outputs:

        >>> agnostic_from_numpy(pd.DataFrame(data))
           c    d  e
        0  5  2.0  1
        1  1  4.0  3
        >>> agnostic_from_numpy(pl.DataFrame(data))
        shape: (2, 3)
        ┌─────┬─────┬─────┐
        │ c   ┆ d   ┆ e   │
        │ --- ┆ --- ┆ --- │
        │ i16 ┆ f32 ┆ i8  │
        ╞═════╪═════╪═════╡
        │ 5   ┆ 2.0 ┆ 1   │
        │ 1   ┆ 4.0 ┆ 3   │
        └─────┴─────┴─────┘
        >>> agnostic_from_numpy(pa.table(data))
        pyarrow.Table
        c: int16
        d: float
        e: int8
        ----
        c: [[5,1]]
        d: [[2,4]]
        e: [[1,3]]
    r  )r   r   rI   r  r  s      r_   
from_numpyr    s*    V -JJ		
 ra   c               0    t        t        | fd|i|      S )u  Read a CSV file into a DataFrame.

    Arguments:
        source: Path to a file.
        native_namespace: The native library to use for DataFrame creation.
        kwargs: Extra keyword arguments which are passed to the native CSV reader.
            For example, you could use
            `nw.read_csv('file.csv', native_namespace=pd, engine='pyarrow')`.

    Returns:
        DataFrame.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoDataFrame
        >>> from types import ModuleType

        Let's create an agnostic function that reads a csv file with a specified native namespace:

        >>> def agnostic_read_csv(native_namespace: ModuleType) -> IntoDataFrame:
        ...     return nw.read_csv("file.csv", native_namespace=native_namespace).to_native()

        Then we can read the file by passing pandas, Polars or PyArrow namespaces:

        >>> agnostic_read_csv(native_namespace=pd)  # doctest:+SKIP
           a  b
        0  1  4
        1  2  5
        2  3  6
        >>> agnostic_read_csv(native_namespace=pl)  # doctest:+SKIP
        shape: (3, 2)
        ┌─────┬─────┐
        │ a   ┆ b   │
        │ --- ┆ --- │
        │ i64 ┆ i64 │
        ╞═════╪═════╡
        │ 1   ┆ 4   │
        │ 2   ┆ 5   │
        │ 3   ┆ 6   │
        └─────┴─────┘
        >>> agnostic_read_csv(native_namespace=pa)  # doctest:+SKIP
        pyarrow.Table
        a: int64
        b: int64
        ----
        a: [[1,2,3]]
        b: [[4,5,6]]
    r  )r   r   sourcer  rd  s      r_   read_csvr    s%    l vK0@KFK ra   c               0    t        t        | fd|i|      S )u  Lazily read from a CSV file.

    For the libraries that do not support lazy dataframes, the function reads
    a csv file eagerly and then converts the resulting dataframe to a lazyframe.

    Arguments:
        source: Path to a file.
        native_namespace: The native library to use for DataFrame creation.
        kwargs: Extra keyword arguments which are passed to the native CSV reader.
            For example, you could use
            `nw.scan_csv('file.csv', native_namespace=pd, engine='pyarrow')`.

    Returns:
        LazyFrame.

    Examples:
        >>> import dask.dataframe as dd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrame
        >>> from types import ModuleType

        Let's create an agnostic function that lazily reads a csv file with a specified native namespace:

        >>> def agnostic_scan_csv(native_namespace: ModuleType) -> IntoFrame:
        ...     return nw.scan_csv("file.csv", native_namespace=native_namespace).to_native()

        Then we can read the file by passing, for example, Polars or Dask namespaces:

        >>> agnostic_scan_csv(native_namespace=pl).collect()  # doctest:+SKIP
        shape: (3, 2)
        ┌─────┬─────┐
        │ a   ┆ b   │
        │ --- ┆ --- │
        │ i64 ┆ i64 │
        ╞═════╪═════╡
        │ 1   ┆ 4   │
        │ 2   ┆ 5   │
        │ 3   ┆ 6   │
        └─────┴─────┘
        >>> agnostic_scan_csv(native_namespace=dd).compute()  # doctest:+SKIP
           a  b
        0  1  4
        1  2  5
        2  3  6
    r  )r   r   r  s      r_   scan_csvr    s%    d vK0@KFK ra   c               0    t        t        | fd|i|      S )u  Read into a DataFrame from a parquet file.

    Arguments:
        source: Path to a file.
        native_namespace: The native library to use for DataFrame creation.
        kwargs: Extra keyword arguments which are passed to the native parquet reader.
            For example, you could use
            `nw.read_parquet('file.parquet', native_namespace=pd, engine='pyarrow')`.

    Returns:
        DataFrame.

    Examples:
        >>> import pandas as pd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoDataFrame
        >>> from types import ModuleType

        Let's create an agnostic function that reads a parquet file with a specified native namespace:

        >>> def agnostic_read_parquet(native_namespace: ModuleType) -> IntoDataFrame:
        ...     return nw.read_parquet(
        ...         "file.parquet", native_namespace=native_namespace
        ...     ).to_native()

        Then we can read the file by passing pandas, Polars or PyArrow namespaces:

        >>> agnostic_read_parquet(native_namespace=pd)  # doctest:+SKIP
           a  b
        0  1  4
        1  2  5
        2  3  6
        >>> agnostic_read_parquet(native_namespace=pl)  # doctest:+SKIP
        shape: (3, 2)
        ┌─────┬─────┐
        │ a   ┆ b   │
        │ --- ┆ --- │
        │ i64 ┆ i64 │
        ╞═════╪═════╡
        │ 1   ┆ 4   │
        │ 2   ┆ 5   │
        │ 3   ┆ 6   │
        └─────┴─────┘
        >>> agnostic_read_parquet(native_namespace=pa)  # doctest:+SKIP
        pyarrow.Table
        a: int64
        b: int64
        ----
        a: [[1,2,3]]
        b: [[4,5,6]]
    r  )r   r   r  s      r_   read_parquetr    s%    p 6O4DOO ra   c               0    t        t        | fd|i|      S )u  Lazily read from a parquet file.

    For the libraries that do not support lazy dataframes, the function reads
    a parquet file eagerly and then converts the resulting dataframe to a lazyframe.

    Arguments:
        source: Path to a file.
        native_namespace: The native library to use for DataFrame creation.
        kwargs: Extra keyword arguments which are passed to the native parquet reader.
            For example, you could use
            `nw.scan_parquet('file.parquet', native_namespace=pd, engine='pyarrow')`.

    Returns:
        LazyFrame.

    Examples:
        >>> import dask.dataframe as dd
        >>> import polars as pl
        >>> import pyarrow as pa
        >>> import narwhals as nw
        >>> from narwhals.typing import IntoFrame
        >>> from types import ModuleType

        Let's create an agnostic function that lazily reads a parquet file with a specified native namespace:

        >>> def agnostic_scan_parquet(native_namespace: ModuleType) -> IntoFrame:
        ...     return nw.scan_parquet(
        ...         "file.parquet", native_namespace=native_namespace
        ...     ).to_native()

        Then we can read the file by passing, for example, Polars or Dask namespaces:

        >>> agnostic_scan_parquet(native_namespace=pl).collect()  # doctest:+SKIP
        shape: (3, 2)
        ┌─────┬─────┐
        │ a   ┆ b   │
        │ --- ┆ --- │
        │ i64 ┆ i64 │
        ╞═════╪═════╡
        │ 1   ┆ 4   │
        │ 2   ┆ 5   │
        │ 3   ┆ 6   │
        └─────┴─────┘
        >>> agnostic_scan_parquet(native_namespace=dd).compute()  # doctest:+SKIP
           a  b
        0  1  4
        1  2  5
        2  3  6
    r  )r   r    r  s      r_   scan_parquetr  8  s%    h 6O4DOO ra   )Mr(   r)   r*   r   r+   r,   r-   r.   r/   r   r0   r1   r2   rH   r3   r4   r5   r6   r7   r   r8   r9   r$   r&   r:   r;   r<   r=   r>   r?   r@   rA   r   r  r  rr  r  r  r   r'   r   r!   r  r  r  rJ   r"   rC   rK   rx  rz  r  r  rL   rM   rN   rO   rP   r  r  r  r}  r  rn  r  ru  r  r  r  r  r   r#   r  r  rE  rD   r   )r   zNwDataFrame[IntoFrameT]r   zDataFrame[IntoFrameT])r   zNwLazyFrame[IntoFrameT]r   LazyFrame[IntoFrameT])r   zNwSeries[Any]r   r&   )r   r	  r   r   )r   r   r   r   )r   zPNwDataFrame[IntoFrameT] | NwLazyFrame[IntoFrameT] | NwSeries[Any] | NwExpr | Anyr   zCDataFrame[IntoFrameT] | LazyFrame[IntoFrameT] | Series | Expr | Any)r  IntoDataFrameT | IntoSeriesTr  r   r  r   r  r   r  r   r  r   r   "DataFrame[IntoDataFrameT] | Series)r  r  r  r   r  r   r  r   r  r   r  r   r   r  )r  rE   r  r   r  r   r  r   r  r   r  Noner   DataFrame[IntoDataFrameT])r  rY   r  r   r  r   r  r   r  r   r  r  r   rY   )r  rE   r  r   r  r   r  r   r  r   r  r  r   r  )r  rY   r  r   r  r   r  r   r  r   r  r  r   rY   )r  IntoFrameT | IntoSeriesTr  r   r  r   r  r   r  r   r  r   r   6DataFrame[IntoFrameT] | LazyFrame[IntoFrameT] | Series)r  rG   r  r   r  r   r  r   r  r   r  r  r   r&   )r  rF   r  r   r  r   r  r   r  r   r  r  r   -DataFrame[IntoFrameT] | LazyFrame[IntoFrameT])r  rY   r  r   r  r   r  r   r  r   r  r  r   rY   )r  rE   r  r   r  r   r  r   r  r   r  r  r   r  )r  rE   r  r   r  r   r  r   r  r   r  r  r   r  )r  r  r  r   r  r   r  r   r  r   r  r   r   (DataFrame[Any] | LazyFrame[Any] | Series)r  zIntoSeriesT | Anyr  r   r  r   r  r   r  r   r  r  r   r&   )r  rF   r  r   r  r   r  r   r  r   r  r  r   r  )r  r  r+  r   r  r   r  r   r  r   r  r   r   r  )r  r  r+  r   r  r   r  r   r  r   r  r   r   r  )r  rE   r+  r   r  r   r  r   r  r   r  r  r   r  )r  rY   r+  r   r  r   r  r   r  r   r  r  r   rY   )r  rE   r+  r   r  r   r  r   r  r   r  r  r   r  )r  rY   r+  r   r  r   r  r   r  r   r  r  r   rY   )r  r  r+  r   r  r   r  r   r  r   r  r   r   r  )r  rG   r+  r   r  r   r  r   r  r   r  r  r   r&   )r  rF   r+  r   r  r   r  r   r  r   r  r  r   r  )r  rY   r+  r   r  r   r  r   r  r   r  r  r   rY   )r  rE   r+  r   r  r   r  r   r  r   r  r  r   r  )r  rE   r+  r   r  r   r  r   r  r   r  r  r   r  )r  r  r+  r   r  r   r  r   r  r   r  r   r   r  )r  rG   r+  r   r  r   r  r   r  r   r  r  r   r&   )r  rF   r+  r   r  r   r  r   r  r   r  r  r   r  )r  r   r+  r   r  r   r  r   r  r   r  bool | Noner   r   )r  zIntoFrameT | IntoSeries | Tr  r  r+  r  r  r   r  r   r  r   r  r  r   z:LazyFrame[IntoFrameT] | DataFrame[IntoFrameT] | Series | T)rD  r  r  r   r   rE   )rD  r  r  r   r   rF   )rD  r&   r  r   r   r   )rD  r   r  r   r   r   )rD  r  r+  r   r   rE   )rD  r  r+  r   r   rF   )rD  r&   r+  r   r   r   )rD  r   r+  r   r   r   )rD  r  r  r  r+  r  r   zIntoFrameT | Anyr\   )rj  zCallable[..., Any] | Noner  r  r+  r  r  r   r  r   r  r   r  r  r   rm  )r   r   )rs  zstr | Iterable[str]r   r   )rv  zint | Sequence[int]r   r   )rf  r   r{  DType | type[DType] | Noner   r   )r  r   r   r   )r  IntoExpr | Iterable[IntoExpr]r   r   )r^  zIterable[DataFrame[Any]]r  -Literal['horizontal', 'vertical', 'diagonal']r   r   )r^  zIterable[LazyFrame[Any]]r  r  r   r   )r^  z)Iterable[DataFrame[Any] | LazyFrame[Any]]r  r  r   zDataFrame[Any] | LazyFrame[Any])
r  r  r  rW   r  r   r   r   r   r   )r  r  r   r   )
r   r   r_  r   r{  r  r  rS   r   r&   )r  rV   r  rS   r   r   )r  zdict[str, Any]r  z dict[str, DType] | Schema | Noner  zModuleType | Noner   r   )r  z
np.ndarrayr  z,dict[str, DType] | Schema | list[str] | Noner  rS   r   r   )r  r   r  rS   rd  r   r   r   )r  r   r  rS   rd  r   r   r   )
__future__r   	functoolsr   typingr   r   r   r	   r
   r   r   r   warningsr   narwhalsrp  r   r   r   rT  r   r  r   r  narwhals.exprr   r	  r   r  r   r  r   r  narwhals.functionsr   r   r   r   r   r   r    r!   r  r"   r#   narwhals.schemar$   NwSchemarU  r&   r  narwhals.stable.v1r'   narwhals.stable.v1.dtypesr(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   narwhals.translaterB   rC   rD   narwhals.typingrE   rF   rG   r   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rR   typesrS   numpynptyping_extensionsrT   narwhals.dtypesrU   rV   rW   rX   rY   r   r  rE  rn  r   rr  ru  rx  rz  r}  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  __all__rf   ra   r_   <module>r     s   "             !   7 7 ( ( ( ) . / / - 1 - 1 : ( , . . % + - 1 * . - . * + - - * + + + , * , , , + , , , - - 0 3 + * & ' ) " 9 1 , / * , * : &%8(*CLn-N+ n-b	T-J' T-nC

Xc] C

Lf
6 f
RX @ 
 J 
 J	 J 
 J	 1 
 1	 ( 
 (	 $ 
 $	YH. 

 "%"%-/- - 	-
  --  -  - (- 
- 
 14"%-/- - 	-
  .-  -  - (- 
- 

 "%"%$!$ $ 	$
  -$  $ $ $ 
$ 

 "%"%  	
  -     
 
 14"%$!$ $ 	$
  .$  $ $ $ 
$ 
 14"%  	
  .     
 

 "%03"%A+A A 	A
  .A  A  A <A 
A 

 "%03  	
  .    
 

 "%03"%88 8 	8
  .8  8 8 38 
8 

 "%03"%  	
  .     
 
  !$"%$!$ $ 	$
  -$  $ $ $ 
$ 
  03"%$!$ $ 	$
  .$  $ $ $ 
$ 
  !$03"%3+3 3 	3
  .3  3  3 .3 
3 
  !$03$  	
  .    
 
  !$03"%88 8 	8
  .8  8 8 38 
8 

 "%"%$/$  $ 	$
  -$  $  $ $ 
$ 
 14"%-/-  - 	-
  .-  -  - (- 
- 

 "%"%$!$  $ 	$
  -$  $ $ $ 
$ 

 "%"%   	
  -     
 
 14"%$!$  $ 	$
  .$  $ $ $ 
$ 
 14"%   	
  .     
 

 "%03"%A+A  A 	A
  .A  A  A <A 
A 

 "%03   	
  .    
 

 "%03"%88  8 	8
  .8  8 8 38 
8 

 "%03"%   	
  .     
 
 $'!$"%$!$ !$ 	$
  -$  $ $ $ 
$ 
 $'03"%$!$ !$ 	$
  .$  $ $ $ 
$ 
 $'!$03"%3+3 !3 	3
  .3  3  3 .3 
3 
 $'!$03 ! 	
  .    
 
 $'!$03"%88 !8 	8
  .8  8 8 38 
8 
 ',  	
  $   	 
  $&+ $N.N N 	N
 N  $N N N @Nb 
KN.;H 
 
GJ*7D 
 
BE R 
 R	 @ 
 @	RU.AO 
 
NQ*=K 
 
IL Y 
 Y	 F 
 F  $	+K+ + 	+
 +^ '+  $&+ $
#  	
   $   D5 p4&n7(t- `8,v5(p5(p5)p7+t5(p91xD1ND1N<2~<1~<1~ 
 :D# 
7 	 
 
 :D# 
7 	 
 :DL14L1 
7L1 %	L1d 	P(PP P 	P
 
Pf36 3464 4H0\ )-K
KK &K
 !K K\.'.>H..f 04F +/	F
F,F (	F
 FV <@R
R8R !	R
 Rj88&08<?88v44&04<?44n::&0:<?::z66&06<?66rNra   