
    g/                     l    d Z ddlZddlmZ ddlmZmZmZmZ ddl	m
Z
  G d de      Z G d d	e
      Zy)
a  
Lexical translation model that considers word order.

IBM Model 2 improves on Model 1 by accounting for word order.
An alignment probability is introduced, a(i | j,l,m), which predicts
a source word position, given its aligned target word's position.

The EM algorithm used in Model 2 is:

:E step: In the training data, collect counts, weighted by prior
         probabilities.

         - (a) count how many times a source language word is translated
               into a target language word
         - (b) count how many times a particular position in the source
               sentence is aligned to a particular position in the target
               sentence

:M step: Estimate new probabilities based on the counts from the E step

Notations
---------

:i: Position in the source sentence
     Valid values are 0 (for NULL), 1, 2, ..., length of source sentence
:j: Position in the target sentence
     Valid values are 1, 2, ..., length of target sentence
:l: Number of words in the source sentence, excluding NULL
:m: Number of words in the target sentence
:s: A word in the source language
:t: A word in the target language

References
----------

Philipp Koehn. 2010. Statistical Machine Translation.
Cambridge University Press, New York.

Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
Robert L. Mercer. 1993. The Mathematics of Statistical Machine
Translation: Parameter Estimation. Computational Linguistics, 19 (2),
263-311.
    Ndefaultdict)AlignedSent	AlignmentIBMModel	IBMModel1)Countsc                   T     e Zd ZdZd fd	Zd Zd Zd Zd Zd Z	d Z
d	 Zd
 Z xZS )	IBMModel2u`  
    Lexical translation model that considers word order

    >>> bitext = []
    >>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
    >>> bitext.append(AlignedSent(['das', 'haus', 'ist', 'ja', 'groß'], ['the', 'house', 'is', 'big']))
    >>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
    >>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
    >>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
    >>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))

    >>> ibm2 = IBMModel2(bitext, 5)

    >>> print(round(ibm2.translation_table['buch']['book'], 3))
    1.0
    >>> print(round(ibm2.translation_table['das']['book'], 3))
    0.0
    >>> print(round(ibm2.translation_table['buch'][None], 3))
    0.0
    >>> print(round(ibm2.translation_table['ja'][None], 3))
    0.0

    >>> print(round(ibm2.alignment_table[1][1][2][2], 3))
    0.939
    >>> print(round(ibm2.alignment_table[1][2][2][2], 3))
    0.0
    >>> print(round(ibm2.alignment_table[2][2][4][5], 3))
    1.0

    >>> test_sentence = bitext[2]
    >>> test_sentence.words
    ['das', 'buch', 'ist', 'ja', 'klein']
    >>> test_sentence.mots
    ['the', 'book', 'is', 'small']
    >>> test_sentence.alignment
    Alignment([(0, 0), (1, 1), (2, 2), (3, 2), (4, 3)])

    c                    t         |   |       |2t        |d|z        }|j                  | _        | j	                  |       n|d   | _        |d   | _        t        d|      D ]  }| j                  |        | j                  |       y)a  
        Train on ``sentence_aligned_corpus`` and create a lexical
        translation model and an alignment model.

        Translation direction is from ``AlignedSent.mots`` to
        ``AlignedSent.words``.

        :param sentence_aligned_corpus: Sentence-aligned parallel corpus
        :type sentence_aligned_corpus: list(AlignedSent)

        :param iterations: Number of iterations to run training algorithm
        :type iterations: int

        :param probability_tables: Optional. Use this to pass in custom
            probability values. If not specified, probabilities will be
            set to a uniform distribution, or some other sensible value.
            If specified, all the following entries must be present:
            ``translation_table``, ``alignment_table``.
            See ``IBMModel`` for the type and purpose of these tables.
        :type probability_tables: dict[str]: object
        N   translation_tablealignment_tabler   )	super__init__r   r   set_uniform_probabilitiesr   rangetrain	align_all)selfsentence_aligned_corpus
iterationsprobability_tablesibm1n	__class__s         H/var/www/openai/venv/lib/python3.12/site-packages/nltk/translate/ibm2.pyr   zIBMModel2.__init__c   s    , 	01% 4a*nED%)%;%;D"**+BC &88K%LD"#56G#HD q*%AJJ./ & 	./    c                    t               }|D ]  }t        |j                        }t        |j                        }||f|vs4|j	                  ||f       d|dz   z  }|t
        j                  k  r$t        j                  dt        |      z   dz          t        d|dz         D ].  }t        d|dz         D ]  }|| j                  |   |   |   |<    0  y )N   zA source sentence is too long (z& words). Results may be less accurate.r   )setlenmotswordsaddr   MIN_PROBwarningswarnstrr   r   )	r   r   l_m_combinationsaligned_sentencelminitial_probijs	            r   r   z#IBMModel2.set_uniform_probabilities   s    5 7$))*A$**+A1v-- $$aV, AE{("3"33MM9a&!BC q!a%A"1a!e_;G,,Q/215a8 - ) !8r   c           
         t               }|D ]  }d g|j                  z   }dg|j                  z   }t        |j                        }t        |j                        }| j	                  ||      }t        d|dz         D ]d  }	||	   }
t        d|dz         D ]K  }||   }| j                  ||	||      }|||
   z  }|j                  |||
       |j                  |||	||       M f  | j                  |       | j                  |       y )NUNUSEDr    r   )Model2Countsr#   r$   r"   prob_all_alignmentsr   prob_alignment_pointupdate_lexical_translationupdate_alignment*maximize_lexical_translation_probabilities maximize_alignment_probabilities)r   parallel_corpuscountsr+   src_sentencetrg_sentencer,   r-   total_countr0   tr/   scountnormalized_counts                  r   r   zIBMModel2.train   s    / 6$4$9$99L$:(8(>(>>L$))*A$**+A 22<NK 1a!e_ Oq!a%A$QA 55aL,WE',{1~'=$556F1M++,<aAqI ) % !0* 	77?--f5r   c                 v   t         j                  }|j                  j                         D ]  \  }}|j                         D ]t  \  }}|j                         D ]\  \  }}|D ]R  }	|j                  |   |   |   |	   |j                  |   |   |	   z  }
t        |
|      | j                  |   |   |   |	<   T ^ v  y N)r   r&   	alignmentitemsalignment_for_any_imaxr   )r   r;   r&   r/   j_sr0   src_sentence_lengthsr,   trg_sentence_lengthsr-   estimates              r   r9   z*IBMModel2.maximize_alignment_probabilities   s    $$&&,,.FAs+.99;''/C/I/I/K+A+1",,Q/215a8$88;A>qAB ! <?x;R,,Q/215a8 2 0L ,7 /r   c                     t        t              }t        dt        |            D ]@  }||   }t        dt        |            D ]!  }||xx   | j	                  ||||      z  cc<   # B |S )a  
        Computes the probability of all possible word alignments,
        expressed as a marginal distribution over target words t

        Each entry in the return value represents the contribution to
        the total alignment probability by the target word t.

        To obtain probability(alignment | src_sentence, trg_sentence),
        simply sum the entries in the return value.

        :return: Probability of t for all s in ``src_sentence``
        :rtype: dict(str): float
        r    r   )r   floatr   r"   r5   )r   r<   r=   alignment_prob_for_tr0   r?   r/   s          r   r4   zIBMModel2.prob_all_alignments   sr      +51q#l+,AQA1c,/0$Q'4+D+Dq,, ' 1 - $#r   c                     t        |      dz
  }t        |      dz
  }||   }||   }| j                  |   |   | j                  |   |   |   |   z  S )zz
        Probability that position j in ``trg_sentence`` is aligned to
        position i in the ``src_sentence``
        r    )r"   r   r   )	r   r/   r0   r<   r=   r,   r-   r@   r?   s	            r   r5   zIBMModel2.prob_alignment_point   si    
 !!OO%%a(+d.B.B1.Ea.H.KA.NNNr   c                 x   d}t        |j                        dz
  }t        |j                        dz
  }t        |j                        D ]W  \  }}|dk(  r|j                  |   }|j                  |   }|| j
                  |   |   | j                  |   |   |   |   z  z  }Y t        |t        j                        S )zc
        Probability of target sentence and an alignment given the
        source sentence
        g      ?r    r   )
r"   r<   r=   	enumeraterE   r   r   rH   r   r&   )	r   alignment_infoprobr,   r-   r0   r/   trg_wordsrc_words	            r   prob_t_a_given_szIBMModel2.prob_t_a_given_s   s    
 ++,q0++,q0n667DAqAv%2215H%2215H&&x0:&&q)!,Q/23D 8 4**++r   c                 4    |D ]  }| j                  |        y rD   )align)r   r:   sentence_pairs      r   r   zIBMModel2.align_all   s    ,MJJ}% -r   c                     g }t        |j                        }t        |j                        }t        |j                        D ]  \  }}| j                  |   d   | j
                  d   |dz      |   |   z  }t        |t        j                        }d}t        |j                        D ]@  \  }	}
| j                  |   |
   | j
                  |	dz      |dz      |   |   z  }||k\  s=|}|	}B |j                  ||f        t        |      |_        y)a  
        Determines the best word alignment for one sentence pair from
        the corpus that the model was trained on.

        The best alignment will be set in ``sentence_pair`` when the
        method returns. In contrast with the internal implementation of
        IBM models, the word indices in the ``Alignment`` are zero-
        indexed, not one-indexed.

        :param sentence_pair: A sentence in the source language and its
            counterpart sentence in the target language
        :type sentence_pair: AlignedSent
        Nr   r    )r"   r#   r$   rR   r   r   rH   r   r&   appendr   rE   )r   rZ   best_alignmentr,   r-   r0   rU   	best_probbest_alignment_pointr/   rV   
align_probs               r   rY   zIBMModel2.align  s1    ""###$$]%8%89KAx &&x06&&q)!a%03A67  Ix'8'89I#' (););<8**84X>**1q51!a%8;A>?  * *I+,(  = !!1&:";<# :& #,N";r   rD   )__name__
__module____qualname____doc__r   r   r   r9   r4   r5   rW   r   rY   __classcell__r   s   @r   r   r   ;   s:    %N'0RH(64
S$.	O,*&&<r   r   c                   .     e Zd ZdZ fdZd Zd Z xZS )r3   zo
    Data object to store counts of various parameters during training.
    Includes counts for alignment.
    c                 f    t         |           t        d       | _        t        d       | _        y )Nc                      t        d       S )Nc                      t        d       S )Nc                       t        t              S rD   r   rN    r   r   <lambda>zKModel2Counts.__init__.<locals>.<lambda>.<locals>.<lambda>.<locals>.<lambda>3  s	    K<Nr   r   rm   r   r   rn   z9Model2Counts.__init__.<locals>.<lambda>.<locals>.<lambda>3  s
    4N(Or   r   rm   r   r   rn   z'Model2Counts.__init__.<locals>.<lambda>3  s
    K OPr   c                      t        d       S )Nc                       t        t              S rD   rl   rm   r   r   rn   z9Model2Counts.__init__.<locals>.<lambda>.<locals>.<lambda>6  s	    E(:r   r   rm   r   r   rn   z'Model2Counts.__init__.<locals>.<lambda>6  s
    K :;r   )r   r   r   rE   rG   )r   r   s    r   r   zModel2Counts.__init__0  s/    $P
 $/;$
 r   c                 f    | j                   |   |xx   |z  cc<   | j                  |xx   |z  cc<   y rD   )	t_given_sany_t_given_s)r   rA   r@   r?   s       r   r6   z'Model2Counts.update_lexical_translation9  s1    q!%1&r   c                 ~    | j                   |   |   |   |xx   |z  cc<   | j                  |   |   |xx   |z  cc<   y rD   )rE   rG   )r   rA   r/   r0   r,   r-   s         r   r7   zModel2Counts.update_alignment=  sE    q!Q"e+"  #A&q)U2)r   )ra   rb   rc   rd   r   r6   r7   re   rf   s   @r   r3   r3   *  s    

'3r   r3   )rd   r'   collectionsr   nltk.translater   r   r   r   nltk.translate.ibm_modelr	   r   r3   rm   r   r   <module>rx      s7   *X  # F F +l< l<^36 3r   