
    g                        d Z ddlZddlmZ ddlmZ  G d de      Z G d de      Z G d	 d
e      Z G d de      Z	ddej                  ej                  z  ej                  z  fdZ e       j                  Z e	       j                  Zy)a
  
Regular-Expression Tokenizers

A ``RegexpTokenizer`` splits a string into substrings using a regular expression.
For example, the following tokenizer forms tokens out of alphabetic sequences,
money expressions, and any other non-whitespace sequences:

    >>> from nltk.tokenize import RegexpTokenizer
    >>> s = "Good muffins cost $3.88\nin New York.  Please buy me\ntwo of them.\n\nThanks."
    >>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+')
    >>> tokenizer.tokenize(s) # doctest: +NORMALIZE_WHITESPACE
    ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.',
    'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']

A ``RegexpTokenizer`` can use its regexp to match delimiters instead:

    >>> tokenizer = RegexpTokenizer(r'\s+', gaps=True)
    >>> tokenizer.tokenize(s) # doctest: +NORMALIZE_WHITESPACE
    ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',
    'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']

Note that empty tokens are not returned when the delimiter appears at
the start or end of the string.

The material between the tokens is discarded.  For example,
the following tokenizer selects just the capitalized words:

    >>> capword_tokenizer = RegexpTokenizer(r'[A-Z]\w+')
    >>> capword_tokenizer.tokenize(s)
    ['Good', 'New', 'York', 'Please', 'Thanks']

This module contains several subclasses of ``RegexpTokenizer``
that use pre-defined regular expressions.

    >>> from nltk.tokenize import BlanklineTokenizer
    >>> # Uses '\s*\n\s*\n\s*':
    >>> BlanklineTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE
    ['Good muffins cost $3.88\nin New York.  Please buy me\ntwo of them.',
    'Thanks.']

All of the regular expression tokenizers are also available as functions:

    >>> from nltk.tokenize import regexp_tokenize, wordpunct_tokenize, blankline_tokenize
    >>> regexp_tokenize(s, pattern=r'\w+|\$[\d\.]+|\S+') # doctest: +NORMALIZE_WHITESPACE
    ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York', '.',
    'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
    >>> wordpunct_tokenize(s) # doctest: +NORMALIZE_WHITESPACE
    ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York',
     '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
    >>> blankline_tokenize(s)
    ['Good muffins cost $3.88\nin New York.  Please buy me\ntwo of them.', 'Thanks.']

Caution: The function ``regexp_tokenize()`` takes the text as its
first argument, and the regular expression pattern as its second
argument.  This differs from the conventions used by Python's
``re`` functions, where the pattern is always the first argument.
(This is for consistency with the other NLTK tokenizers.)
    N)
TokenizerI)regexp_span_tokenizec                   ~    e Zd ZdZddej
                  ej                  z  ej                  z  fdZd Z	d Z
d Zd Zy	)
RegexpTokenizera  
    A tokenizer that splits a string using a regular expression, which
    matches either the tokens or the separators between tokens.

        >>> tokenizer = RegexpTokenizer(r'\w+|\$[\d\.]+|\S+')

    :type pattern: str
    :param pattern: The pattern used to build this tokenizer.
        (This pattern must not contain capturing parentheses;
        Use non-capturing parentheses, e.g. (?:...), instead)
    :type gaps: bool
    :param gaps: True if this tokenizer's pattern should be used
        to find separators between tokens; False if this
        tokenizer's pattern should be used to find the tokens
        themselves.
    :type discard_empty: bool
    :param discard_empty: True if any empty tokens `''`
        generated by the tokenizer should be discarded.  Empty
        tokens can only be generated if `_gaps == True`.
    :type flags: int
    :param flags: The regexp flags used to compile this
        tokenizer's pattern.  By default, the following flags are
        used: `re.UNICODE | re.MULTILINE | re.DOTALL`.

    FTc                 d    t        |d|      }|| _        || _        || _        || _        d | _        y )Npattern)getattr_pattern_gaps_discard_empty_flags_regexp)selfr   gapsdiscard_emptyflagss        I/var/www/openai/venv/lib/python3.12/site-packages/nltk/tokenize/regexp.py__init__zRegexpTokenizer.__init__f   s6     '9g6
+    c                 |    | j                   0t        j                  | j                  | j                        | _         y y N)r   recompiler
   r   r   s    r   _check_regexpzRegexpTokenizer._check_regexpv   s+    <<::dmmT[[ADL  r   c                     | j                          | j                  rS| j                  r,| j                  j	                  |      D cg c]  }|s|	 c}S | j                  j	                  |      S | j                  j                  |      S c c}w r   )r   r   r   r   splitfindall)r   texttoks      r   tokenizezRegexpTokenizer.tokenizez   sx    ::""'+||'9'9$'?G'?3'?GG||))$// <<''-- Hs   BBc              #      K   | j                          | j                  r7t        || j                        D ]  \  }}| j                  r||k(  r||f  y t        j                  | j                  |      D ]  }|j                           y wr   )r   r   r   r   r   r   finditerspan)r   r   leftrightms        r   span_tokenizezRegexpTokenizer.span_tokenize   sr     ::3D$,,Ge+++%  H [[t4ffh 5s   A
BABc                     dj                  | j                  j                  | j                  | j                  | j
                  | j                        S )Nz;{}(pattern={!r}, gaps={!r}, discard_empty={!r}, flags={!r}))format	__class____name__r
   r   r   r   r   s    r   __repr__zRegexpTokenizer.__repr__   s@    LSSNN##MMJJKK
 	
r   N)r,   
__module____qualname____doc__r   UNICODE	MULTILINEDOTALLr   r   r!   r(   r-    r   r   r   r   K   sB    : jj2<<'"))3 B.	
r   r   c                       e Zd ZdZd Zy)WhitespaceTokenizera  
    Tokenize a string on whitespace (space, tab, newline).
    In general, users should use the string ``split()`` method instead.

        >>> from nltk.tokenize import WhitespaceTokenizer
        >>> s = "Good muffins cost $3.88\nin New York.  Please buy me\ntwo of them.\n\nThanks."
        >>> WhitespaceTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE
        ['Good', 'muffins', 'cost', '$3.88', 'in', 'New', 'York.',
        'Please', 'buy', 'me', 'two', 'of', 'them.', 'Thanks.']
    c                 4    t         j                  | dd       y )Nz\s+Tr   r   r   r   s    r   r   zWhitespaceTokenizer.__init__   s      vD 9r   Nr,   r.   r/   r0   r   r4   r   r   r6   r6      s    	:r   r6   c                       e Zd ZdZd Zy)BlanklineTokenizerz
    Tokenize a string, treating any sequence of blank lines as a delimiter.
    Blank lines are defined as lines containing no characters, except for
    space or tab characters.
    c                 4    t         j                  | dd       y )Nz\s*\n\s*\n\s*Tr8   r9   r   s    r   r   zBlanklineTokenizer.__init__   s      '7d Cr   Nr:   r4   r   r   r<   r<      s    Dr   r<   c                       e Zd ZdZd Zy)WordPunctTokenizera   
    Tokenize a text into a sequence of alphabetic and
    non-alphabetic characters, using the regexp ``\w+|[^\w\s]+``.

        >>> from nltk.tokenize import WordPunctTokenizer
        >>> s = "Good muffins cost $3.88\nin New York.  Please buy me\ntwo of them.\n\nThanks."
        >>> WordPunctTokenizer().tokenize(s) # doctest: +NORMALIZE_WHITESPACE
        ['Good', 'muffins', 'cost', '$', '3', '.', '88', 'in', 'New', 'York',
        '.', 'Please', 'buy', 'me', 'two', 'of', 'them', '.', 'Thanks', '.']
    c                 0    t         j                  | d       y )Nz\w+|[^\w\s]+r9   r   s    r   r   zWordPunctTokenizer.__init__   s      7r   Nr:   r4   r   r   r?   r?      s    	8r   r?   FTc                 @    t        ||||      }|j                  |       S )zr
    Return a tokenized copy of *text*.  See :class:`.RegexpTokenizer`
    for descriptions of the arguments.
    )r   r!   )r   r   r   r   r   	tokenizers         r   regexp_tokenizerC      s%      }eDId##r   )r0   r   nltk.tokenize.apir   nltk.tokenize.utilr   r   r6   r<   r?   r1   r2   r3   rC   r!   blankline_tokenizewordpunct_tokenizer4   r   r   <module>rH      s   9v 
 ( 3N
j N
b:/ : D D8 80 

**r||
#bii
/$ ()22 ')22 r   