
    g                         d Z ddlZddlZddlZddlmZmZmZ ddl	m
Z
 ddlmZ ddlmZmZmZmZ ddlmZ  G d d	e
      Zej*                  Zej,                  Zej.                  Zej2                  Zy)
z 
Implements ONNX's backend API.
    N)
ModelProtohelperversion)Backend)check_model)InferenceSessionSessionOptionsget_available_providers
get_device)OnnxRuntimeBackendRepc                       e Zd ZdZ e ej                  dd      dk(        Zedd       Z	ed        Z
ed        Zedd       Zedd	       Zedd
       Zy)OnnxRuntimeBackenda  
    Implements
    `ONNX's backend API <https://github.com/onnx/onnx/blob/main/docs/ImplementingAnOnnxBackend.md>`_
    with *ONNX Runtime*.
    The backend is mostly used when you need to switch between
    multiple runtimes with the same API.
    `Importing models from ONNX to Caffe2 <https://github.com/onnx/tutorials/blob/master/tutorials/OnnxCaffe2Import.ipynb>`_
    shows how to use *caffe2* as a backend for a converted model.
    Note: This is not the official Python API.
    ALLOW_RELEASED_ONNX_OPSET_ONLY1Nc                 <    |
t               }| j                  |      S )z
        Return whether the model is compatible with the backend.

        :param model: unused
        :param device: None to use the default device or a string (ex: `'CPU'`)
        :return: boolean
        )r   supports_device)clsmodeldevicekwargss       P/var/www/openai/venv/lib/python3.12/site-packages/onnxruntime/backend/backend.pyis_compatiblez OnnxRuntimeBackend.is_compatible"   s!     >\F""6**    c                    | j                   re|j                  D ]V  }|j                  r|j                  nd}	 ||j                  f}|t        j
                  vrd| d|j                   d}d|fc S X y	# t        $ rF |dk(  r|j                  dkD  s|dk(  r*|j                  dkD  rd| d|j                   d}d|fcY c S Y w xY w)
a  
        Return whether the opset for the model is supported by the backend.
        When By default only released onnx opsets are allowed by the backend
        To test new opsets env variable ALLOW_RELEASED_ONNX_OPSET_ONLY should be set to 0

        :param model: Model whose opsets needed to be verified.
        :return: boolean and error message if opset is not supported.
        zai.onnxzSkipping this test as only released onnx opsets are supported.To run this test set env variable ALLOW_RELEASED_ONNX_OPSET_ONLY to 0. Got Domain 'z' version 'z'.F   z
ai.ommx.ml   )T )allowReleasedOpsetsOnlyopset_importdomainr   r   OP_SET_ID_VERSION_MAPAttributeError)r   r   opsetr    keyerror_messages         r   is_opset_supportedz%OnnxRuntimeBackend.is_opset_supported/   s     &&++).94!5==1C&">">>,,28;u}}oRQ &
  %m33 ?	 ,,  & 
4 )+0BR^H^chcpcpstct,,28;u}}oRQ &
  %m33
4s   6A33ACCc                 (    |dk(  rd}|t               v S )z
        Check whether the backend is compiled with particular device support.
        In particular it's used in the testing suite.
        CUDAGPU)r   )r   r   s     r   r   z"OnnxRuntimeBackend.supports_deviceR   s     VF%%r   c                    t        |t              r|S t        |t              rt        |      S t        |t        t        f      rt               }|j                         D ]  \  }}t        ||      st        |||       ! t        j                  dd      j                  d      }t               D cg c]	  }||vs| }	}t        |||	      }
|
j                          |+| j                  |      st        d| dt!                d       | j"                  |
|fi |S t$        j&                  j)                  t&        j&                        xs t$        j&                  j+                  d	      }|j,                  d
k\  }|r|j/                         n|}t1        |       | j3                  |      \  }}|st5        j6                  |      |}t        |t        t        f      s|j/                         } | j"                  ||fi |S c c}w )a
  
        Load the model and creates a :class:`onnxruntime.InferenceSession`
        ready to be used as a backend.

        :param model: ModelProto (returned by `onnx.load`),
            string for a filename or bytes for a serialized model
        :param device: requested device for the computation,
            None means the default one which depends on
            the compilation settings
        :param kwargs: see :class:`onnxruntime.SessionOptions`
        :return: :class:`onnxruntime.InferenceSession`
        "ORT_ONNX_BACKEND_EXCLUDE_PROVIDERSr   )default,)sess_options	providerszIncompatible device expected 'z', got ''0)   
   r   )
isinstancer   r   strbytesr	   itemshasattrsetattrosgetenvsplitr
   disable_fallbackr   RuntimeErrorr   prepare	packagingr   parseVersionreleaseSerializeToStringr   r&   unittestSkipTest)r   r   r   r   optionskvexcluded_providersxr/   infonnx_version$onnx_supports_serialized_model_checkbin_or_modelopset_supportedr%   bins                    r   r?   zOnnxRuntimeBackend.prepare\   s    e23L/0(//U|,$&G17A&GQ* ' "$+OY[!\!b!bcf!g$;$=_$=q!K]B]$=I_"5w)TC   "!#*=*=f*E"%CF88T^T`Saab#cdd3;;sF5f55 %,,227??CeyGXGXG`G`adGeL3?3G3G:3U08\5224bgL%-0-C-CE-J*O]"''66 CcC<0++-3;;sF5f555 `s   2	H<Hc                 P     | j                   ||fi |} |j                  |fi |S )a  
        Compute the prediction.

        :param model: :class:`onnxruntime.InferenceSession` returned
            by function *prepare*
        :param inputs: inputs
        :param device: requested device for the computation,
            None means the default one which depends on
            the compilation settings
        :param kwargs: see :class:`onnxruntime.RunOptions`
        :return: predictions
        )r?   run)r   r   inputsr   r   reps         r   	run_modelzOnnxRuntimeBackend.run_model   s1     ckk%262swwv(((r   c                     t        d      )z
        This method is not implemented as it is much more efficient
        to run a whole model than every node independently.
        zMIt is much more efficient to run a whole model than every node independently.)NotImplementedError)r   noderT   r   outputs_infor   s         r   run_nodezOnnxRuntimeBackend.run_node   s     ""qrrr   )N)NN)__name__
__module____qualname____doc__boolr:   r;   r   classmethodr   r&   r   r?   rV   r[    r   r   r   r      s    	 #9299-Ms#SWZ#Z[
+ 
+    D & & 26 26h ) )  s sr   r   )r_   r:   rE   packaging.versionr@   onnxr   r   r   onnx.backend.baser   onnx.checkerr   onnxruntimer   r	   r
   r   onnxruntime.backend.backend_repr   r   r   r?   rV   rS   r   rb   r   r   <module>ri      si   
 
   , , % $ ] ] ATs Tsn #00

$
$""$44r   