
    gw                         d Z ddlZddlZddlZddlmZ ddlmZ ddl	m
Z
  G d de      Z	 	 	 ddZ	 	 	 dd	Zd
 Zd Zd Z G d d      Zy)zBLEU score implementation.    N)Counter)Fraction)ngramsc                   L     e Zd ZdZd fd	Ze fd       Ze fd       Z xZS )r   z/Fraction with _normalize=False support for 3.12c                     t         j                  dk\  rt        |   | ||      }nt        |   | |||      }||_        ||_        ||_        |S )N)      
_normalize)sysversion_infosuper__new__r   _original_numerator_original_denominator)cls	numeratordenominatorr   self	__class__s        N/var/www/openai/venv/lib/python3.12/site-packages/nltk/translate/bleu_score.pyr   zFraction.__new__   sU    w&7?3	;?D7?3	;:?VD$#, %0"    c                 H    | j                   s| j                  S t        |   S N)r   r   r   r   r   r   s    r   r   zFraction.numerator    s!    +++w  r   c                 H    | j                   s| j                  S t        |   S r   )r   r   r   r   r   s    r   r   zFraction.denominator&   s!    ---w""r   )r   NF)	__name__
__module____qualname____doc__r   propertyr   r   __classcell__)r   s   @r   r   r      s3    9 ! !
 # #r   r   c                 $    t        | g|g|||      S )a  
    Calculate BLEU score (Bilingual Evaluation Understudy) from
    Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.
    "BLEU: a method for automatic evaluation of machine translation."
    In Proceedings of ACL. https://www.aclweb.org/anthology/P02-1040.pdf

    >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
    ...               'ensures', 'that', 'the', 'military', 'always',
    ...               'obeys', 'the', 'commands', 'of', 'the', 'party']

    >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
    ...               'forever', 'hearing', 'the', 'activity', 'guidebook',
    ...               'that', 'party', 'direct']

    >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
    ...               'ensures', 'that', 'the', 'military', 'will', 'forever',
    ...               'heed', 'Party', 'commands']

    >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
    ...               'guarantees', 'the', 'military', 'forces', 'always',
    ...               'being', 'under', 'the', 'command', 'of', 'the',
    ...               'Party']

    >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
    ...               'army', 'always', 'to', 'heed', 'the', 'directions',
    ...               'of', 'the', 'party']

    >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS
    0.5045...

    If there is no ngrams overlap for any order of n-grams, BLEU returns the
    value 0. This is because the precision for the order of n-grams without
    overlap is 0, and the geometric mean in the final BLEU score computation
    multiplies the 0 with the precision of other n-grams. This results in 0
    (independently of the precision of the other n-gram orders). The following
    example has zero 3-gram and 4-gram overlaps:

    >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS
    0.0

    To avoid this harsh behaviour when no ngram overlaps are found a smoothing
    function can be used.

    >>> chencherry = SmoothingFunction()
    >>> sentence_bleu([reference1, reference2, reference3], hypothesis2,
    ...     smoothing_function=chencherry.method1) # doctest: +ELLIPSIS
    0.0370...

    The default BLEU calculates a score for up to 4-grams using uniform
    weights (this is called BLEU-4). To evaluate your translations with
    higher/lower order ngrams, use customized weights. E.g. when accounting
    for up to 5-grams with uniform weights (this is called BLEU-5) use:

    >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)
    >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS
    0.3920...

    Multiple BLEU scores can be computed at once, by supplying a list of weights.
    E.g. for computing BLEU-2, BLEU-3 *and* BLEU-4 in one computation, use:
    >>> weights = [
    ...     (1./2., 1./2.),
    ...     (1./3., 1./3., 1./3.),
    ...     (1./4., 1./4., 1./4., 1./4.)
    ... ]
    >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS
    [0.7453..., 0.6240..., 0.5045...]

    :param references: reference sentences
    :type references: list(list(str))
    :param hypothesis: a hypothesis sentence
    :type hypothesis: list(str)
    :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights)
    :type weights: tuple(float) / list(tuple(float))
    :param smoothing_function:
    :type smoothing_function: SmoothingFunction
    :param auto_reweigh: Option to re-normalize the weights uniformly.
    :type auto_reweigh: bool
    :return: The sentence-level BLEU score. Returns a list if multiple weights were supplied.
    :rtype: float / list(float)
    )corpus_bleu)
references
hypothesisweightssmoothing_functionauto_reweighs        r   sentence_bleur*   -   s"    n 	zlG-? r   c                    t               }t               }d\  }}t        |       t        |      k(  sJ d       	 |d   d    t        d |D              }	t        | |      D ]s  \  }
}t	        d|	dz         D ]=  }t        |
||      }||xx   |j                  z  cc<   ||xx   |j                  z  cc<   ? t        |      }||z  }|t        |
|      z  }u t        ||      }t	        d|	dz         D cg c]  }t        ||   ||   d       }}|d   dk(  rt        |      dk(  rdS dgt        |      z  S |st               j                  } ||
|      }g }|D ]f  }|r|d	k  r|d
k(  r	d|z  f|z  }d t        ||      D        }|t        j                  t        j                  |            z  }|j!                  |       h t        |      dk(  r|d   S |S #  |g}Y xY wc c}w )a  
    Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all
    the hypotheses and their respective references.

    Instead of averaging the sentence level BLEU scores (i.e. macro-average
    precision), the original BLEU metric (Papineni et al. 2002) accounts for
    the micro-average precision (i.e. summing the numerators and denominators
    for each hypothesis-reference(s) pairs before the division).

    >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
    ...         'ensures', 'that', 'the', 'military', 'always',
    ...         'obeys', 'the', 'commands', 'of', 'the', 'party']
    >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
    ...          'ensures', 'that', 'the', 'military', 'will', 'forever',
    ...          'heed', 'Party', 'commands']
    >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',
    ...          'guarantees', 'the', 'military', 'forces', 'always',
    ...          'being', 'under', 'the', 'command', 'of', 'the', 'Party']
    >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
    ...          'army', 'always', 'to', 'heed', 'the', 'directions',
    ...          'of', 'the', 'party']

    >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
    ...         'interested', 'in', 'world', 'history']
    >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
    ...          'because', 'he', 'read', 'the', 'book']

    >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
    >>> hypotheses = [hyp1, hyp2]
    >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS
    0.5920...

    The example below show that corpus_bleu() is different from averaging
    sentence_bleu() for hypotheses

    >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)
    >>> score2 = sentence_bleu([ref2a], hyp2)
    >>> (score1 + score2) / 2 # doctest: +ELLIPSIS
    0.6223...

    Custom weights may be supplied to fine-tune the BLEU score further.
    A tuple of float weights for unigrams, bigrams, trigrams and so on can be given.
    >>> weights = (0.1, 0.3, 0.5, 0.1)
    >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS
    0.5818...

    This particular weight gave extra value to trigrams.
    Furthermore, multiple weights can be given, resulting in multiple BLEU scores.
    >>> weights = [
    ...     (0.5, 0.5),
    ...     (0.333, 0.333, 0.334),
    ...     (0.25, 0.25, 0.25, 0.25),
    ...     (0.2, 0.2, 0.2, 0.2, 0.2)
    ... ]
    >>> corpus_bleu(list_of_references, hypotheses, weights=weights) # doctest: +ELLIPSIS
    [0.8242..., 0.7067..., 0.5920..., 0.4719...]

    :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses
    :type list_of_references: list(list(list(str)))
    :param hypotheses: a list of hypothesis sentences
    :type hypotheses: list(list(str))
    :param weights: weights for unigrams, bigrams, trigrams and so on (one or a list of weights)
    :type weights: tuple(float) / list(tuple(float))
    :param smoothing_function:
    :type smoothing_function: SmoothingFunction
    :param auto_reweigh: Option to re-normalize the weights uniformly.
    :type auto_reweigh: bool
    :return: The corpus-level BLEU score.
    :rtype: float
    )r   r   zCThe number of hypotheses and their reference(s) should be the same r   c              3   2   K   | ]  }t        |        y wr   len).0weights     r   	<genexpr>zcorpus_bleu.<locals>.<genexpr>   s     >gFCKg      Fr
   )r%   r&   hyp_len         ?r7   r7   r7   c              3   ^   K   | ]%  \  }}|d kD  s|t        j                  |      z   ' yw)r   N)mathlog)r/   w_ip_is      r   r1   zcorpus_bleu.<locals>.<genexpr>  s*     M1AXS#S1WS488C= 1As   --)r   r.   maxziprangemodified_precisionr   r   closest_ref_lengthbrevity_penaltyr   SmoothingFunctionmethod0r9   expfsumappend)list_of_references
hypothesesr'   r(   r)   p_numeratorsp_denominatorshyp_lengthsref_lengthsmax_weight_lengthr%   r&   ir<   r4   bpp_nbleu_scoresr0   ss                       r   r$   r$      s6   ^ 9LYN#K!"c*o5 P5
1 >g>> #&&8*"E
J q+a/0A$ZQ?COs}},O10 1 j/w)*g>> #F 
k	2B
 q+a/00A 	a."3F0   A!LA%q=A3W+== .088
 
z;C K Q6-E#Ek/+k9MVS1AM$))A,''1  !\Q.;q>?K?q),s   G 2G%G"c           
         t        |      |k\  rt        t        ||            n	t               }i }| D ]X  }t        |      |k\  rt        t        ||            n	t               }|D ]$  }t        |j	                  |d      ||         ||<   & Z |j                         D ci c]  \  }}|t        |||          }	}}t        |	j                               }
t        dt        |j                                     }t        |
|d      S c c}}w )a  
    Calculate modified ngram precision.

    The normal precision method may lead to some wrong translations with
    high-precision, e.g., the translation, in which a word of reference
    repeats several times, has very high precision.

    This function only returns the Fraction object that contains the numerator
    and denominator necessary to calculate the corpus-level precision.
    To calculate the modified precision for a single pair of hypothesis and
    references, cast the Fraction object into a float.

    The famous "the the the ... " example shows that you can get BLEU precision
    by duplicating high frequency words.

        >>> reference1 = 'the cat is on the mat'.split()
        >>> reference2 = 'there is a cat on the mat'.split()
        >>> hypothesis1 = 'the the the the the the the'.split()
        >>> references = [reference1, reference2]
        >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
        0.2857...

    In the modified n-gram precision, a reference word will be considered
    exhausted after a matching hypothesis word is identified, e.g.

        >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
        ...               'ensures', 'that', 'the', 'military', 'will',
        ...               'forever', 'heed', 'Party', 'commands']
        >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
        ...               'guarantees', 'the', 'military', 'forces', 'always',
        ...               'being', 'under', 'the', 'command', 'of', 'the',
        ...               'Party']
        >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
        ...               'army', 'always', 'to', 'heed', 'the', 'directions',
        ...               'of', 'the', 'party']
        >>> hypothesis = 'of the'.split()
        >>> references = [reference1, reference2, reference3]
        >>> float(modified_precision(references, hypothesis, n=1))
        1.0
        >>> float(modified_precision(references, hypothesis, n=2))
        1.0

    An example of a normal machine translation hypothesis:

        >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
        ...               'ensures', 'that', 'the', 'military', 'always',
        ...               'obeys', 'the', 'commands', 'of', 'the', 'party']

        >>> hypothesis2 = ['It', 'is', 'to', 'insure', 'the', 'troops',
        ...               'forever', 'hearing', 'the', 'activity', 'guidebook',
        ...               'that', 'party', 'direct']

        >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
        ...               'ensures', 'that', 'the', 'military', 'will',
        ...               'forever', 'heed', 'Party', 'commands']

        >>> reference2 = ['It', 'is', 'the', 'guiding', 'principle', 'which',
        ...               'guarantees', 'the', 'military', 'forces', 'always',
        ...               'being', 'under', 'the', 'command', 'of', 'the',
        ...               'Party']

        >>> reference3 = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
        ...               'army', 'always', 'to', 'heed', 'the', 'directions',
        ...               'of', 'the', 'party']
        >>> references = [reference1, reference2, reference3]
        >>> float(modified_precision(references, hypothesis1, n=1)) # doctest: +ELLIPSIS
        0.9444...
        >>> float(modified_precision(references, hypothesis2, n=1)) # doctest: +ELLIPSIS
        0.5714...
        >>> float(modified_precision(references, hypothesis1, n=2)) # doctest: +ELLIPSIS
        0.5882352941176471
        >>> float(modified_precision(references, hypothesis2, n=2)) # doctest: +ELLIPSIS
        0.07692...


    :param references: A list of reference translations.
    :type references: list(list(str))
    :param hypothesis: A hypothesis translation.
    :type hypothesis: list(str)
    :param n: The ngram order.
    :type n: int
    :return: BLEU's modified precision for the nth order ngram.
    :rtype: Fraction
    r   r3   Fr
   )
r.   r   r   r=   getitemsminsumvaluesr   )r%   r&   ncounts
max_counts	referencereference_countsngramcountclipped_countsr   r   s               r   r@   r@     s   n 03:!/CWVJ*+F J	-0^q-@GF9a()gi 	 E #JNN5!$<>Nu>U VJu 	   BHAOs5*U+,,   N))+,I aV]]_-.KI{u==s    Dc                 :    d | D        }t        |fd      }|S )a  
    This function finds the reference that is the closest length to the
    hypothesis. The closest reference length is referred to as *r* variable
    from the brevity penalty formula in Papineni et. al. (2002)

    :param references: A list of reference translations.
    :type references: list(list(str))
    :param hyp_len: The length of the hypothesis.
    :type hyp_len: int
    :return: The length of the reference that's closest to the hypothesis.
    :rtype: int
    c              3   2   K   | ]  }t        |        y wr   r-   )r/   r]   s     r   r1   z%closest_ref_length.<locals>.<genexpr>  s     ;
9I
r2   c                 $    t        | z
        | fS r   )abs)ref_lenr4   s    r   <lambda>z$closest_ref_length.<locals>.<lambda>  s    s7W+<'=w&Gr   )key)rW   )r%   r4   ref_lensclosest_ref_lens    `  r   rA   rA     s'     <
;HGO r   c                 P    || kD  ry|dk(  ryt        j                  d| |z  z
        S )a  
    Calculate brevity penalty.

    As the modified n-gram precision still has the problem from the short
    length sentence, brevity penalty is used to modify the overall BLEU
    score according to length.

    An example from the paper. There are three references with length 12, 15
    and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.

    >>> reference1 = list('aaaaaaaaaaaa')      # i.e. ['a'] * 12
    >>> reference2 = list('aaaaaaaaaaaaaaa')   # i.e. ['a'] * 15
    >>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17
    >>> hypothesis = list('aaaaaaaaaaaa')      # i.e. ['a'] * 12
    >>> references = [reference1, reference2, reference3]
    >>> hyp_len = len(hypothesis)
    >>> closest_ref_len =  closest_ref_length(references, hyp_len)
    >>> brevity_penalty(closest_ref_len, hyp_len)
    1.0

    In case a hypothesis translation is shorter than the references, penalty is
    applied.

    >>> references = [['a'] * 28, ['a'] * 28]
    >>> hypothesis = ['a'] * 12
    >>> hyp_len = len(hypothesis)
    >>> closest_ref_len =  closest_ref_length(references, hyp_len)
    >>> brevity_penalty(closest_ref_len, hyp_len)
    0.2635971381157267

    The length of the closest reference is used to compute the penalty. If the
    length of a hypothesis is 12, and the reference lengths are 13 and 2, the
    penalty is applied because the hypothesis length (12) is less then the
    closest reference length (13).

    >>> references = [['a'] * 13, ['a'] * 2]
    >>> hypothesis = ['a'] * 12
    >>> hyp_len = len(hypothesis)
    >>> closest_ref_len =  closest_ref_length(references, hyp_len)
    >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
    0.9200...

    The brevity penalty doesn't depend on reference order. More importantly,
    when two reference sentences are at the same distance, the shortest
    reference sentence length is used.

    >>> references = [['a'] * 13, ['a'] * 11]
    >>> hypothesis = ['a'] * 12
    >>> hyp_len = len(hypothesis)
    >>> closest_ref_len =  closest_ref_length(references, hyp_len)
    >>> bp1 = brevity_penalty(closest_ref_len, hyp_len)
    >>> hyp_len = len(hypothesis)
    >>> closest_ref_len =  closest_ref_length(reversed(references), hyp_len)
    >>> bp2 = brevity_penalty(closest_ref_len, hyp_len)
    >>> bp1 == bp2 == 1
    True

    A test example from mteval-v13a.pl (starting from the line 705):

    >>> references = [['a'] * 11, ['a'] * 8]
    >>> hypothesis = ['a'] * 7
    >>> hyp_len = len(hypothesis)
    >>> closest_ref_len =  closest_ref_length(references, hyp_len)
    >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS
    0.8668...

    >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]
    >>> hypothesis = ['a'] * 7
    >>> hyp_len = len(hypothesis)
    >>> closest_ref_len =  closest_ref_length(references, hyp_len)
    >>> brevity_penalty(closest_ref_len, hyp_len)
    1.0

    :param hyp_len: The length of the hypothesis for a single sentence OR the
        sum of all the hypotheses' lengths for a corpus
    :type hyp_len: int
    :param closest_ref_len: The length of the closest reference for a single
        hypothesis OR the sum of all the closest references for every hypotheses.
    :type closest_ref_len: int
    :return: BLEU's brevity penalty.
    :rtype: float
    r3   r   )r9   rE   )rj   r4   s     r   rB   rB     s3    f  	AxxOg5566r   c                   P    e Zd ZdZddZd Zd Zd Zd ZddZ	dd	Z
dd
ZddZy)rC   a.  
    This is an implementation of the smoothing techniques
    for segment-level BLEU scores that was presented in
    Boxing Chen and Collin Cherry (2014) A Systematic Comparison of
    Smoothing Techniques for Sentence-Level BLEU. In WMT14.
    http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
    c                 .    || _         || _        || _        y)a]  
        This will initialize the parameters required for the various smoothing
        techniques, the default values are set to the numbers used in the
        experiments from Chen and Cherry (2014).

        >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures',
        ...                 'that', 'the', 'military', 'always', 'obeys', 'the',
        ...                 'commands', 'of', 'the', 'party']
        >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures',
        ...               'that', 'the', 'military', 'will', 'forever', 'heed',
        ...               'Party', 'commands']

        >>> chencherry = SmoothingFunction()
        >>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS
        0.4118...
        >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS
        0.4118...
        >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS
        0.4118...
        >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS
        0.4452...
        >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS
        0.4118...
        >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS
        0.4118...
        >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS
        0.4905...
        >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS
        0.4135...
        >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS
        0.4905...

        :param epsilon: the epsilon value use in method 1
        :type epsilon: float
        :param alpha: the alpha value use in method 6
        :type alpha: int
        :param k: the k value use in method 4
        :type k: int
        N)epsilonalphak)r   rn   ro   rp   s       r   __init__zSmoothingFunction.__init__  s    P 
r   c                 (   g }t        |      D ]  \  }}|j                  dk7  r|j                  |       't        d      j	                  |dz         }t        j                  |       |j                  t        j                  j                          |S )z
        No smoothing.
        r   z
The hypothesis contains 0 counts of {}-gram overlaps.
Therefore the BLEU score evaluates to 0, independently of
how many N-gram overlaps of lower order it contains.
Consider using lower n-gram order or use SmoothingFunction()r3   )
	enumerater   rG   strformatwarningswarnr   
float_inforW   )r   rQ   argskwargsp_n_newrO   r<   _msgs           r   rD   zSmoothingFunction.method01  s     nFAs}}!s#* &Q-  d# s~~112# %$ r   c                     |D cg c]9  }|j                   dk(  r&|j                   | j                  z   |j                  z  n|; c}S c c}w )zV
        Smoothing method 1: Add *epsilon* counts to precision with 0 counts.
        r   )r   rn   r   )r   rQ   ry   rz   r<   s        r   method1zSmoothingFunction.method1J  sY     
  ==A% -@ 
 	
 
s   >Ac                     t        t        |            D cg c]:  }|dk7  r.t        ||   j                  dz   ||   j                  dz   d      n|d   < c}S c c}w )z
        Smoothing method 2: Add 1 to both numerator and denominator from
        Chin-Yew Lin and Franz Josef Och (2004) ORANGE: a Method for
        Evaluating Automatic Evaluation Metrics for Machine Translation.
        In COLING 2004.
        r   r3   Fr
   )r?   r.   r   r   r   )r   rQ   ry   rz   rO   s        r   method2zSmoothingFunction.method2W  sr     3s8_
 % 6 Q))A-s1v/A/AA/ERWXV %
 	
 
s   ?Ac                     d}t        |      D ]2  \  }}|j                  dk(  sdd|z  |j                  z  z  ||<   |dz  }4 |S )a  
        Smoothing method 3: NIST geometric sequence smoothing
        The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each
        precision score whose matching n-gram count is null.
        k is 1 for the first 'n' value for which the n-gram match count is null/

        For example, if the text contains:

        - one 2-gram match
        - and (consequently) two 1-gram matches

        the n-gram count for each individual precision score would be:

        - n=1  =>  prec_count = 2     (two unigrams)
        - n=2  =>  prec_count = 1     (one bigram)
        - n=3  =>  prec_count = 1/2   (no trigram,  taking 'smoothed' value of 1 / ( 2^k ), with k=1)
        - n=4  =>  prec_count = 1/4   (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2)
        r3   r      )rs   r   r   )r   rQ   ry   rz   incvntrO   r<   s          r   method3zSmoothingFunction.method3g  sR    & nFAs}}!ai#//9:A! % 
r   Nc                     d}|r|n
t        |      }t        |      D ]]  \  }}	|	j                  dk(  s|dkD  sdd|z  | j                  z  t	        j
                  |      z  z  }
|
|	j                  z  ||<   |dz  }_ |S )a]  
        Smoothing method 4:
        Shorter translations may have inflated precision values due to having
        smaller denominators; therefore, we give them proportionally
        smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry
        suggests dividing by 1/ln(len(T)), where T is the length of the translation.
        r3   r   r   )r.   rs   r   rp   r9   r:   r   )r   rQ   r%   r&   r4   ry   rz   r   rO   r<   r   s              r   method4zSmoothingFunction.method4  s     $'#j/nFAs}}!gk
 FTVV!3dhhw6G!GH	"S__4A! % 
r   c                     |r|n
t        |      }i }|t        ||d      gz   }|d   dz   |d<   t        |      D ]'  \  }	}
||	dz
     |
z   ||	dz      z   dz  ||	<   ||	   ||	<   ) |S )u   
        Smoothing method 5:
        The matched counts for similar values of n should be similar. To a
        calculate the n-gram matched count, it averages the n−1, n and n+1 gram
        matched counts.
           r   r3   r   )r.   r@   rs   )r   rQ   r%   r&   r4   ry   rz   m	p_n_plus1rO   r<   s              r   method5zSmoothingFunction.method5  s     %'#j/-j*aHII	A
"nFAsAhnyQ'771<CFq6AaD % 
r   c           	      N   |r|n
t        |      }|d   sJ d       t        |      D ]{  \  }}|dv r||dz
     dk(  rdn||dz
     dz  ||dz
     z  }	|j                  }
t        d t	        ||dz         D              }|
| j
                  |	z  z   || j
                  z   z  ||<   } |S )u  
        Smoothing method 6:
        Interpolates the maximum likelihood estimate of the precision *p_n* with
        a prior estimate *pi0*. The prior is estimated by assuming that the ratio
        between pn and pn−1 will be the same as that between pn−1 and pn−2; from
        Gao and He (2013) Training MRF-Based Phrase Translation Models using
        Gradient Ascent. In NAACL.
        r   z>This smoothing method requires non-zero precision for bigrams.)r   r3   r   r3   c              3       K   | ]  }d   yw)r3   N )r/   _s     r   r1   z,SmoothingFunction.method6.<locals>.<genexpr>  s     =#<a#<s   )r.   rs   r   rX   r   ro   )r   rQ   r%   r&   r4   ry   rz   rO   r<   pi0r   ls               r   method6zSmoothingFunction.method6  s     %'#j/ 1vWWWvnFAsF{q1u:?aAE
a#a!e*0LMM=6*a!e#<==djj3..1tzz>BA % 
r   c                 t    |r|n
t        |      }| j                  ||||      }| j                  ||||      }|S )zK
        Smoothing method 7:
        Interpolates methods 4 and 5.
        )r.   r   r   )r   rQ   r%   r&   r4   ry   rz   s          r   method7zSmoothingFunction.method7  s>    
 %'#j/ll3
J@ll3
J@
r   )g?r   r   r   )r   r   r   r    rq   rD   r~   r   r   r   r   r   r   r   r   r   rC   rC     s5    *X2

 4*"6r   rC   )r6   NF)r    r9   r   rv   collectionsr   	fractionsr   	_Fraction	nltk.utilr   r*   r$   r@   rA   rB   rC   r   r   r   <module>r      sl    !  
   + #y #: %Y~ %Q@hl>^(Y7xN Nr   