/* Generated by Cython 3.2.2 */

/* BEGIN: Cython Metadata
{
    "distutils": {
        "depends": [],
        "extra_compile_args": [
            "-std=c++14",
            "-fpermissive",
            "-Wno-deprecated-declarations",
            "-fno-var-tracking-assignments",
            "-O3"
        ],
        "include_dirs": [
            "/opt/python/cp310-cp310/include",
            "/host//home/runner/_work/cuda-python/cuda-python/cuda_toolkit/include"
        ],
        "language": "c++",
        "library_dirs": [
            "/tmp/build-env-yyn641v7/lib/python3.10/site-packages",
            "/tmp/build-env-yyn641v7/lib",
            "/host//home/runner/_work/cuda-python/cuda-python/cuda_toolkit/lib64",
            "/host//home/runner/_work/cuda-python/cuda-python/cuda_toolkit/lib"
        ],
        "name": "cuda.bindings.cy_nvml",
        "sources": [
            "cuda/bindings/cy_nvml.pyx"
        ]
    },
    "module_name": "cuda.bindings.cy_nvml"
}
END: Cython Metadata */

#ifndef PY_SSIZE_T_CLEAN
#define PY_SSIZE_T_CLEAN
#endif /* PY_SSIZE_T_CLEAN */
/* InitLimitedAPI */
#if defined(Py_LIMITED_API)
  #if !defined(CYTHON_LIMITED_API)
  #define CYTHON_LIMITED_API 1
  #endif
#elif defined(CYTHON_LIMITED_API)
  #ifdef _MSC_VER
  #pragma message ("Limited API usage is enabled with 'CYTHON_LIMITED_API' but 'Py_LIMITED_API' does not define a Python target version. Consider setting 'Py_LIMITED_API' instead.")
  #else
  #warning Limited API usage is enabled with 'CYTHON_LIMITED_API' but 'Py_LIMITED_API' does not define a Python target version. Consider setting 'Py_LIMITED_API' instead.
  #endif
#endif

#include "Python.h"
#ifndef Py_PYTHON_H
    #error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x03080000
    #error Cython requires Python 3.8+.
#else
#define __PYX_ABI_VERSION "3_2_2"
#define CYTHON_HEX_VERSION 0x030202F0
#define CYTHON_FUTURE_DIVISION 1
/* CModulePreamble */
#include <stddef.h>
#ifndef offsetof
  #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS)
  #ifndef __stdcall
    #define __stdcall
  #endif
  #ifndef __cdecl
    #define __cdecl
  #endif
  #ifndef __fastcall
    #define __fastcall
  #endif
#endif
#ifndef DL_IMPORT
  #define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
  #define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef PY_LONG_LONG
  #define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
  #define Py_HUGE_VAL HUGE_VAL
#endif
#define __PYX_LIMITED_VERSION_HEX PY_VERSION_HEX
#if defined(GRAALVM_PYTHON)
  /* For very preliminary testing purposes. Most variables are set the same as PyPy.
     The existence of this section does not imply that anything works or is even tested */
  #define CYTHON_COMPILING_IN_PYPY 0
  #define CYTHON_COMPILING_IN_CPYTHON 0
  #define CYTHON_COMPILING_IN_LIMITED_API 0
  #define CYTHON_COMPILING_IN_GRAAL 1
  #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0
  #undef CYTHON_USE_TYPE_SLOTS
  #define CYTHON_USE_TYPE_SLOTS 0
  #undef CYTHON_USE_TYPE_SPECS
  #define CYTHON_USE_TYPE_SPECS 0
  #undef CYTHON_USE_PYTYPE_LOOKUP
  #define CYTHON_USE_PYTYPE_LOOKUP 0
  #undef CYTHON_USE_PYLIST_INTERNALS
  #define CYTHON_USE_PYLIST_INTERNALS 0
  #undef CYTHON_USE_UNICODE_INTERNALS
  #define CYTHON_USE_UNICODE_INTERNALS 0
  #undef CYTHON_USE_UNICODE_WRITER
  #define CYTHON_USE_UNICODE_WRITER 0
  #undef CYTHON_USE_PYLONG_INTERNALS
  #define CYTHON_USE_PYLONG_INTERNALS 0
  #undef CYTHON_AVOID_BORROWED_REFS
  #define CYTHON_AVOID_BORROWED_REFS 1
  #undef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
  #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 0
  #undef CYTHON_ASSUME_SAFE_MACROS
  #define CYTHON_ASSUME_SAFE_MACROS 0
  #undef CYTHON_ASSUME_SAFE_SIZE
  #define CYTHON_ASSUME_SAFE_SIZE 0
  #undef CYTHON_UNPACK_METHODS
  #define CYTHON_UNPACK_METHODS 0
  #undef CYTHON_FAST_THREAD_STATE
  #define CYTHON_FAST_THREAD_STATE 0
  #undef CYTHON_FAST_GIL
  #define CYTHON_FAST_GIL 0
  #undef CYTHON_METH_FASTCALL
  #define CYTHON_METH_FASTCALL 0
  #undef CYTHON_FAST_PYCALL
  #define CYTHON_FAST_PYCALL 0
  #ifndef CYTHON_PEP487_INIT_SUBCLASS
    #define CYTHON_PEP487_INIT_SUBCLASS 1
  #endif
  #undef CYTHON_PEP489_MULTI_PHASE_INIT
  #define CYTHON_PEP489_MULTI_PHASE_INIT 1
  #undef CYTHON_USE_MODULE_STATE
  #define CYTHON_USE_MODULE_STATE 0
  #undef CYTHON_USE_SYS_MONITORING
  #define CYTHON_USE_SYS_MONITORING 0
  #undef CYTHON_USE_TP_FINALIZE
  #define CYTHON_USE_TP_FINALIZE 0
  #undef CYTHON_USE_AM_SEND
  #define CYTHON_USE_AM_SEND 0
  #undef CYTHON_USE_DICT_VERSIONS
  #define CYTHON_USE_DICT_VERSIONS 0
  #undef CYTHON_USE_EXC_INFO_STACK
  #define CYTHON_USE_EXC_INFO_STACK 1
  #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
    #define CYTHON_UPDATE_DESCRIPTOR_DOC 0
  #endif
  #undef CYTHON_USE_FREELISTS
  #define CYTHON_USE_FREELISTS 0
  #undef CYTHON_IMMORTAL_CONSTANTS
  #define CYTHON_IMMORTAL_CONSTANTS 0
#elif defined(PYPY_VERSION)
  #define CYTHON_COMPILING_IN_PYPY 1
  #define CYTHON_COMPILING_IN_CPYTHON 0
  #define CYTHON_COMPILING_IN_LIMITED_API 0
  #define CYTHON_COMPILING_IN_GRAAL 0
  #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0
  #undef CYTHON_USE_TYPE_SLOTS
  #define CYTHON_USE_TYPE_SLOTS 1
  #ifndef CYTHON_USE_TYPE_SPECS
    #define CYTHON_USE_TYPE_SPECS 0
  #endif
  #undef CYTHON_USE_PYTYPE_LOOKUP
  #define CYTHON_USE_PYTYPE_LOOKUP 0
  #undef CYTHON_USE_PYLIST_INTERNALS
  #define CYTHON_USE_PYLIST_INTERNALS 0
  #undef CYTHON_USE_UNICODE_INTERNALS
  #define CYTHON_USE_UNICODE_INTERNALS 0
  #undef CYTHON_USE_UNICODE_WRITER
  #define CYTHON_USE_UNICODE_WRITER 0
  #undef CYTHON_USE_PYLONG_INTERNALS
  #define CYTHON_USE_PYLONG_INTERNALS 0
  #undef CYTHON_AVOID_BORROWED_REFS
  #define CYTHON_AVOID_BORROWED_REFS 1
  #undef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
  #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 1
  #undef CYTHON_ASSUME_SAFE_MACROS
  #define CYTHON_ASSUME_SAFE_MACROS 0
  #ifndef CYTHON_ASSUME_SAFE_SIZE
    #define CYTHON_ASSUME_SAFE_SIZE 1
  #endif
  #undef CYTHON_UNPACK_METHODS
  #define CYTHON_UNPACK_METHODS 0
  #undef CYTHON_FAST_THREAD_STATE
  #define CYTHON_FAST_THREAD_STATE 0
  #undef CYTHON_FAST_GIL
  #define CYTHON_FAST_GIL 0
  #undef CYTHON_METH_FASTCALL
  #define CYTHON_METH_FASTCALL 0
  #undef CYTHON_FAST_PYCALL
  #define CYTHON_FAST_PYCALL 0
  #ifndef CYTHON_PEP487_INIT_SUBCLASS
    #define CYTHON_PEP487_INIT_SUBCLASS 1
  #endif
  #if PY_VERSION_HEX < 0x03090000
    #undef CYTHON_PEP489_MULTI_PHASE_INIT
    #define CYTHON_PEP489_MULTI_PHASE_INIT 0
  #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT)
    #define CYTHON_PEP489_MULTI_PHASE_INIT 1
  #endif
  #undef CYTHON_USE_MODULE_STATE
  #define CYTHON_USE_MODULE_STATE 0
  #undef CYTHON_USE_SYS_MONITORING
  #define CYTHON_USE_SYS_MONITORING 0
  #ifndef CYTHON_USE_TP_FINALIZE
    #define CYTHON_USE_TP_FINALIZE (PYPY_VERSION_NUM >= 0x07030C00)
  #endif
  #undef CYTHON_USE_AM_SEND
  #define CYTHON_USE_AM_SEND 0
  #undef CYTHON_USE_DICT_VERSIONS
  #define CYTHON_USE_DICT_VERSIONS 0
  #undef CYTHON_USE_EXC_INFO_STACK
  #define CYTHON_USE_EXC_INFO_STACK 0
  #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
    #define CYTHON_UPDATE_DESCRIPTOR_DOC (PYPY_VERSION_NUM >= 0x07031100)
  #endif
  #undef CYTHON_USE_FREELISTS
  #define CYTHON_USE_FREELISTS 0
  #undef CYTHON_IMMORTAL_CONSTANTS
  #define CYTHON_IMMORTAL_CONSTANTS 0
#elif defined(CYTHON_LIMITED_API)
  #ifdef Py_LIMITED_API
    #undef __PYX_LIMITED_VERSION_HEX
    #define __PYX_LIMITED_VERSION_HEX Py_LIMITED_API
  #endif
  #define CYTHON_COMPILING_IN_PYPY 0
  #define CYTHON_COMPILING_IN_CPYTHON 0
  #define CYTHON_COMPILING_IN_LIMITED_API 1
  #define CYTHON_COMPILING_IN_GRAAL 0
  #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0
  #undef CYTHON_USE_TYPE_SLOTS
  #define CYTHON_USE_TYPE_SLOTS 0
  #undef CYTHON_USE_TYPE_SPECS
  #define CYTHON_USE_TYPE_SPECS 1
  #undef CYTHON_USE_PYTYPE_LOOKUP
  #define CYTHON_USE_PYTYPE_LOOKUP 0
  #undef CYTHON_USE_PYLIST_INTERNALS
  #define CYTHON_USE_PYLIST_INTERNALS 0
  #undef CYTHON_USE_UNICODE_INTERNALS
  #define CYTHON_USE_UNICODE_INTERNALS 0
  #ifndef CYTHON_USE_UNICODE_WRITER
    #define CYTHON_USE_UNICODE_WRITER 0
  #endif
  #undef CYTHON_USE_PYLONG_INTERNALS
  #define CYTHON_USE_PYLONG_INTERNALS 0
  #ifndef CYTHON_AVOID_BORROWED_REFS
    #define CYTHON_AVOID_BORROWED_REFS 0
  #endif
  #ifndef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
    #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 0
  #endif
  #undef CYTHON_ASSUME_SAFE_MACROS
  #define CYTHON_ASSUME_SAFE_MACROS 0
  #undef CYTHON_ASSUME_SAFE_SIZE
  #define CYTHON_ASSUME_SAFE_SIZE 0
  #undef CYTHON_UNPACK_METHODS
  #define CYTHON_UNPACK_METHODS 0
  #undef CYTHON_FAST_THREAD_STATE
  #define CYTHON_FAST_THREAD_STATE 0
  #undef CYTHON_FAST_GIL
  #define CYTHON_FAST_GIL 0
  #undef CYTHON_METH_FASTCALL
  #define CYTHON_METH_FASTCALL (__PYX_LIMITED_VERSION_HEX >= 0x030C0000)
  #undef CYTHON_FAST_PYCALL
  #define CYTHON_FAST_PYCALL 0
  #ifndef CYTHON_PEP487_INIT_SUBCLASS
    #define CYTHON_PEP487_INIT_SUBCLASS 1
  #endif
  #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
    #define CYTHON_PEP489_MULTI_PHASE_INIT 1
  #endif
  #ifndef CYTHON_USE_MODULE_STATE
    #define CYTHON_USE_MODULE_STATE 0
  #endif
  #undef CYTHON_USE_SYS_MONITORING
  #define CYTHON_USE_SYS_MONITORING 0
  #ifndef CYTHON_USE_TP_FINALIZE
    #define CYTHON_USE_TP_FINALIZE 0
  #endif
  #ifndef CYTHON_USE_AM_SEND
    #define CYTHON_USE_AM_SEND (__PYX_LIMITED_VERSION_HEX >= 0x030A0000)
  #endif
  #undef CYTHON_USE_DICT_VERSIONS
  #define CYTHON_USE_DICT_VERSIONS 0
  #undef CYTHON_USE_EXC_INFO_STACK
  #define CYTHON_USE_EXC_INFO_STACK 0
  #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
    #define CYTHON_UPDATE_DESCRIPTOR_DOC 0
  #endif
  #ifndef CYTHON_USE_FREELISTS
  #define CYTHON_USE_FREELISTS 1
  #endif
  #undef CYTHON_IMMORTAL_CONSTANTS
  #define CYTHON_IMMORTAL_CONSTANTS 0
#else
  #define CYTHON_COMPILING_IN_PYPY 0
  #define CYTHON_COMPILING_IN_CPYTHON 1
  #define CYTHON_COMPILING_IN_LIMITED_API 0
  #define CYTHON_COMPILING_IN_GRAAL 0
  #ifdef Py_GIL_DISABLED
    #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 1
  #else
    #define CYTHON_COMPILING_IN_CPYTHON_FREETHREADING 0
  #endif
  #if PY_VERSION_HEX < 0x030A0000
    #undef CYTHON_USE_TYPE_SLOTS
    #define CYTHON_USE_TYPE_SLOTS 1
  #elif !defined(CYTHON_USE_TYPE_SLOTS)
    #define CYTHON_USE_TYPE_SLOTS 1
  #endif
  #ifndef CYTHON_USE_TYPE_SPECS
    #define CYTHON_USE_TYPE_SPECS 0
  #endif
  #ifndef CYTHON_USE_PYTYPE_LOOKUP
    #define CYTHON_USE_PYTYPE_LOOKUP 1
  #endif
  #ifndef CYTHON_USE_PYLONG_INTERNALS
    #define CYTHON_USE_PYLONG_INTERNALS 1
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    #undef CYTHON_USE_PYLIST_INTERNALS
    #define CYTHON_USE_PYLIST_INTERNALS 0
  #elif !defined(CYTHON_USE_PYLIST_INTERNALS)
    #define CYTHON_USE_PYLIST_INTERNALS 1
  #endif
  #ifndef CYTHON_USE_UNICODE_INTERNALS
    #define CYTHON_USE_UNICODE_INTERNALS 1
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING || PY_VERSION_HEX >= 0x030B00A2
    #undef CYTHON_USE_UNICODE_WRITER
    #define CYTHON_USE_UNICODE_WRITER 0
  #elif !defined(CYTHON_USE_UNICODE_WRITER)
    #define CYTHON_USE_UNICODE_WRITER 1
  #endif
  #ifndef CYTHON_AVOID_BORROWED_REFS
    #define CYTHON_AVOID_BORROWED_REFS 0
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    #undef CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
    #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 1
  #elif !defined(CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS)
    #define CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS 0
  #endif
  #ifndef CYTHON_ASSUME_SAFE_MACROS
    #define CYTHON_ASSUME_SAFE_MACROS 1
  #endif
  #ifndef CYTHON_ASSUME_SAFE_SIZE
    #define CYTHON_ASSUME_SAFE_SIZE 1
  #endif
  #ifndef CYTHON_UNPACK_METHODS
    #define CYTHON_UNPACK_METHODS 1
  #endif
  #ifndef CYTHON_FAST_THREAD_STATE
    #define CYTHON_FAST_THREAD_STATE 1
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    #undef CYTHON_FAST_GIL
    #define CYTHON_FAST_GIL 0
  #elif !defined(CYTHON_FAST_GIL)
    #define CYTHON_FAST_GIL (PY_VERSION_HEX < 0x030C00A6)
  #endif
  #ifndef CYTHON_METH_FASTCALL
    #define CYTHON_METH_FASTCALL 1
  #endif
  #ifndef CYTHON_FAST_PYCALL
    #define CYTHON_FAST_PYCALL 1
  #endif
  #ifndef CYTHON_PEP487_INIT_SUBCLASS
    #define CYTHON_PEP487_INIT_SUBCLASS 1
  #endif
  #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
    #define CYTHON_PEP489_MULTI_PHASE_INIT 1
  #endif
  #ifndef CYTHON_USE_MODULE_STATE
    #define CYTHON_USE_MODULE_STATE 0
  #endif
  #ifndef CYTHON_USE_SYS_MONITORING
    #define CYTHON_USE_SYS_MONITORING (PY_VERSION_HEX >= 0x030d00B1)
  #endif
  #ifndef CYTHON_USE_TP_FINALIZE
    #define CYTHON_USE_TP_FINALIZE 1
  #endif
  #ifndef CYTHON_USE_AM_SEND
    #define CYTHON_USE_AM_SEND 1
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    #undef CYTHON_USE_DICT_VERSIONS
    #define CYTHON_USE_DICT_VERSIONS 0
  #elif !defined(CYTHON_USE_DICT_VERSIONS)
    #define CYTHON_USE_DICT_VERSIONS  (PY_VERSION_HEX < 0x030C00A5 && !CYTHON_USE_MODULE_STATE)
  #endif
  #ifndef CYTHON_USE_EXC_INFO_STACK
    #define CYTHON_USE_EXC_INFO_STACK 1
  #endif
  #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC
    #define CYTHON_UPDATE_DESCRIPTOR_DOC 1
  #endif
  #ifndef CYTHON_USE_FREELISTS
    #define CYTHON_USE_FREELISTS (!CYTHON_COMPILING_IN_CPYTHON_FREETHREADING)
  #endif
  #if defined(CYTHON_IMMORTAL_CONSTANTS) && PY_VERSION_HEX < 0x030C0000
    #undef CYTHON_IMMORTAL_CONSTANTS
    #define CYTHON_IMMORTAL_CONSTANTS 0  // definitely won't work
  #elif !defined(CYTHON_IMMORTAL_CONSTANTS)
    #define CYTHON_IMMORTAL_CONSTANTS (PY_VERSION_HEX >= 0x030C0000 && !CYTHON_USE_MODULE_STATE && CYTHON_COMPILING_IN_CPYTHON_FREETHREADING)
  #endif
#endif
#ifndef CYTHON_COMPRESS_STRINGS
  #define CYTHON_COMPRESS_STRINGS 1
#endif
#ifndef CYTHON_FAST_PYCCALL
#define CYTHON_FAST_PYCCALL  CYTHON_FAST_PYCALL
#endif
#ifndef CYTHON_VECTORCALL
#if CYTHON_COMPILING_IN_LIMITED_API
#define CYTHON_VECTORCALL  (__PYX_LIMITED_VERSION_HEX >= 0x030C0000)
#else
#define CYTHON_VECTORCALL  (CYTHON_FAST_PYCCALL)
#endif
#endif
#if CYTHON_USE_PYLONG_INTERNALS
  #undef SHIFT
  #undef BASE
  #undef MASK
  #ifdef SIZEOF_VOID_P
    enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
  #endif
#endif
#ifndef __has_attribute
  #define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
  #define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
  #if defined(__GNUC__)
    #define CYTHON_RESTRICT __restrict__
  #elif defined(_MSC_VER) && _MSC_VER >= 1400
    #define CYTHON_RESTRICT __restrict
  #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
    #define CYTHON_RESTRICT restrict
  #else
    #define CYTHON_RESTRICT
  #endif
#endif
#ifndef CYTHON_UNUSED
  #if defined(__cplusplus)
    /* for clang __has_cpp_attribute(maybe_unused) is true even before C++17
     * but leads to warnings with -pedantic, since it is a C++17 feature */
    #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L)
      #if __has_cpp_attribute(maybe_unused)
        #define CYTHON_UNUSED [[maybe_unused]]
      #endif
    #endif
  #endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
#   if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
#     define CYTHON_UNUSED __attribute__ ((__unused__))
#   else
#     define CYTHON_UNUSED
#   endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
#   define CYTHON_UNUSED __attribute__ ((__unused__))
# else
#   define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_UNUSED_VAR
#  if defined(__cplusplus)
     template<class T> void CYTHON_UNUSED_VAR( const T& ) { }
#  else
#    define CYTHON_UNUSED_VAR(x) (void)(x)
#  endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
  #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x)
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON && !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
#  define CYTHON_NCP_UNUSED
# else
#  define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_USE_CPP_STD_MOVE
  #if defined(__cplusplus) && (\
    __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1600))
    #define CYTHON_USE_CPP_STD_MOVE 1
  #else
    #define CYTHON_USE_CPP_STD_MOVE 0
  #endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#include <stdint.h>
typedef uintptr_t  __pyx_uintptr_t;
#ifndef CYTHON_FALLTHROUGH
  #if defined(__cplusplus)
    /* for clang __has_cpp_attribute(fallthrough) is true even before C++17
     * but leads to warnings with -pedantic, since it is a C++17 feature */
    #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L)
      #if __has_cpp_attribute(fallthrough)
        #define CYTHON_FALLTHROUGH [[fallthrough]]
      #endif
    #endif
    #ifndef CYTHON_FALLTHROUGH
      #if __has_cpp_attribute(clang::fallthrough)
        #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
      #elif __has_cpp_attribute(gnu::fallthrough)
        #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
      #endif
    #endif
  #endif
  #ifndef CYTHON_FALLTHROUGH
    #if __has_attribute(fallthrough)
      #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
    #else
      #define CYTHON_FALLTHROUGH
    #endif
  #endif
  #if defined(__clang__) && defined(__apple_build_version__)
    #if __apple_build_version__ < 7000000
      #undef  CYTHON_FALLTHROUGH
      #define CYTHON_FALLTHROUGH
    #endif
  #endif
#endif
#ifndef Py_UNREACHABLE
  #define Py_UNREACHABLE()  assert(0); abort()
#endif
#ifdef __cplusplus
  template <typename T>
  struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);};
  #define __PYX_IS_UNSIGNED(type) (__PYX_IS_UNSIGNED_IMPL<type>::value)
#else
  #define __PYX_IS_UNSIGNED(type) (((type)-1) > 0)
#endif
#if CYTHON_COMPILING_IN_PYPY == 1
  #define __PYX_NEED_TP_PRINT_SLOT  (PY_VERSION_HEX < 0x030A0000)
#else
  #define __PYX_NEED_TP_PRINT_SLOT  (PY_VERSION_HEX < 0x03090000)
#endif
#define __PYX_REINTERPRET_FUNCION(func_pointer, other_pointer) ((func_pointer)(void(*)(void))(other_pointer))

/* CppInitCode */
#ifndef __cplusplus
  #error "Cython files generated with the C++ option must be compiled with a C++ compiler."
#endif
#ifndef CYTHON_INLINE
  #if defined(__clang__)
    #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
  #else
    #define CYTHON_INLINE inline
  #endif
#endif
template<typename T>
void __Pyx_call_destructor(T& x) {
    x.~T();
}
template<typename T>
class __Pyx_FakeReference {
  public:
    __Pyx_FakeReference() : ptr(NULL) { }
    __Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { }
    T *operator->() { return ptr; }
    T *operator&() { return ptr; }
    operator T&() { return *ptr; }
    template<typename U> bool operator ==(const U& other) const { return *ptr == other; }
    template<typename U> bool operator !=(const U& other) const { return *ptr != other; }
    template<typename U> bool operator==(const __Pyx_FakeReference<U>& other) const { return *ptr == *other.ptr; }
    template<typename U> bool operator!=(const __Pyx_FakeReference<U>& other) const { return *ptr != *other.ptr; }
  private:
    T *ptr;
};

/* PythonCompatibility */
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_DefaultClassType PyType_Type
#if CYTHON_COMPILING_IN_LIMITED_API
    #ifndef CO_OPTIMIZED
    static int CO_OPTIMIZED;
    #endif
    #ifndef CO_NEWLOCALS
    static int CO_NEWLOCALS;
    #endif
    #ifndef CO_VARARGS
    static int CO_VARARGS;
    #endif
    #ifndef CO_VARKEYWORDS
    static int CO_VARKEYWORDS;
    #endif
    #ifndef CO_ASYNC_GENERATOR
    static int CO_ASYNC_GENERATOR;
    #endif
    #ifndef CO_GENERATOR
    static int CO_GENERATOR;
    #endif
    #ifndef CO_COROUTINE
    static int CO_COROUTINE;
    #endif
#else
    #ifndef CO_COROUTINE
      #define CO_COROUTINE 0x80
    #endif
    #ifndef CO_ASYNC_GENERATOR
      #define CO_ASYNC_GENERATOR 0x200
    #endif
#endif
static int __Pyx_init_co_variables(void);
#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE)
  #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type)
#else
  #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type))
#endif
#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_Is)
  #define __Pyx_Py_Is(x, y)  Py_Is(x, y)
#else
  #define __Pyx_Py_Is(x, y) ((x) == (y))
#endif
#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsNone)
  #define __Pyx_Py_IsNone(ob) Py_IsNone(ob)
#else
  #define __Pyx_Py_IsNone(ob) __Pyx_Py_Is((ob), Py_None)
#endif
#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsTrue)
  #define __Pyx_Py_IsTrue(ob) Py_IsTrue(ob)
#else
  #define __Pyx_Py_IsTrue(ob) __Pyx_Py_Is((ob), Py_True)
#endif
#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsFalse)
  #define __Pyx_Py_IsFalse(ob) Py_IsFalse(ob)
#else
  #define __Pyx_Py_IsFalse(ob) __Pyx_Py_Is((ob), Py_False)
#endif
#define __Pyx_NoneAsNull(obj)  (__Pyx_Py_IsNone(obj) ? NULL : (obj))
#if PY_VERSION_HEX >= 0x030900F0 && !CYTHON_COMPILING_IN_PYPY
  #define __Pyx_PyObject_GC_IsFinalized(o) PyObject_GC_IsFinalized(o)
#else
  #define __Pyx_PyObject_GC_IsFinalized(o) _PyGC_FINALIZED(o)
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
  #define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
  #define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
  #define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
  #define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef Py_TPFLAGS_SEQUENCE
  #define Py_TPFLAGS_SEQUENCE 0
#endif
#ifndef Py_TPFLAGS_MAPPING
  #define Py_TPFLAGS_MAPPING 0
#endif
#ifndef Py_TPFLAGS_IMMUTABLETYPE
  #define Py_TPFLAGS_IMMUTABLETYPE (1UL << 8)
#endif
#ifndef Py_TPFLAGS_DISALLOW_INSTANTIATION
  #define Py_TPFLAGS_DISALLOW_INSTANTIATION (1UL << 7)
#endif
#ifndef METH_STACKLESS
  #define METH_STACKLESS 0
#endif
#ifndef METH_FASTCALL
  #ifndef METH_FASTCALL
     #define METH_FASTCALL 0x80
  #endif
  typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
  typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
                                                          Py_ssize_t nargs, PyObject *kwnames);
#else
  #if PY_VERSION_HEX >= 0x030d00A4
  #  define __Pyx_PyCFunctionFast PyCFunctionFast
  #  define __Pyx_PyCFunctionFastWithKeywords PyCFunctionFastWithKeywords
  #else
  #  define __Pyx_PyCFunctionFast _PyCFunctionFast
  #  define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
  #endif
#endif
#if CYTHON_METH_FASTCALL
  #define __Pyx_METH_FASTCALL METH_FASTCALL
  #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast
  #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords
#else
  #define __Pyx_METH_FASTCALL METH_VARARGS
  #define __Pyx_PyCFunction_FastCall PyCFunction
  #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords
#endif
#if CYTHON_VECTORCALL
  #define __pyx_vectorcallfunc vectorcallfunc
  #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET  PY_VECTORCALL_ARGUMENTS_OFFSET
  #define __Pyx_PyVectorcall_NARGS(n)  PyVectorcall_NARGS((size_t)(n))
#else
  #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET  0
  #define __Pyx_PyVectorcall_NARGS(n)  ((Py_ssize_t)(n))
#endif
#if PY_VERSION_HEX >= 0x030900B1
#define __Pyx_PyCFunction_CheckExact(func)  PyCFunction_CheckExact(func)
#else
#define __Pyx_PyCFunction_CheckExact(func)  PyCFunction_Check(func)
#endif
#define __Pyx_CyOrPyCFunction_Check(func)  PyCFunction_Check(func)
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_CyOrPyCFunction_GET_FUNCTION(func)  (((PyCFunctionObject*)(func))->m_ml->ml_meth)
#elif !CYTHON_COMPILING_IN_LIMITED_API
#define __Pyx_CyOrPyCFunction_GET_FUNCTION(func)  PyCFunction_GET_FUNCTION(func)
#endif
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_CyOrPyCFunction_GET_FLAGS(func)  (((PyCFunctionObject*)(func))->m_ml->ml_flags)
static CYTHON_INLINE PyObject* __Pyx_CyOrPyCFunction_GET_SELF(PyObject *func) {
    return (__Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_STATIC) ? NULL : ((PyCFunctionObject*)func)->m_self;
}
#endif
static CYTHON_INLINE int __Pyx__IsSameCFunction(PyObject *func, void (*cfunc)(void)) {
#if CYTHON_COMPILING_IN_LIMITED_API
    return PyCFunction_Check(func) && PyCFunction_GetFunction(func) == (PyCFunction) cfunc;
#else
    return PyCFunction_Check(func) && PyCFunction_GET_FUNCTION(func) == (PyCFunction) cfunc;
#endif
}
#define __Pyx_IsSameCFunction(func, cfunc)   __Pyx__IsSameCFunction(func, cfunc)
#if PY_VERSION_HEX < 0x03090000 || (CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000)
  #define __Pyx_PyType_FromModuleAndSpec(m, s, b)  ((void)m, PyType_FromSpecWithBases(s, b))
  typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *);
#else
  #define __Pyx_PyType_FromModuleAndSpec(m, s, b)  PyType_FromModuleAndSpec(m, s, b)
  #define __Pyx_PyCMethod  PyCMethod
#endif
#ifndef METH_METHOD
  #define METH_METHOD 0x200
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
  #define PyObject_Malloc(s)   PyMem_Malloc(s)
  #define PyObject_Free(p)     PyMem_Free(p)
  #define PyObject_Realloc(p)  PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
  #define __Pyx_PyFrame_SetLineNumber(frame, lineno)
#elif CYTHON_COMPILING_IN_GRAAL && defined(GRAALPY_VERSION_NUM) && GRAALPY_VERSION_NUM > 0x19000000
  #define __Pyx_PyCode_HasFreeVars(co)  (PyCode_GetNumFree(co) > 0)
  #define __Pyx_PyFrame_SetLineNumber(frame, lineno) GraalPyFrame_SetLineNumber((frame), (lineno))
#elif CYTHON_COMPILING_IN_GRAAL
  #define __Pyx_PyCode_HasFreeVars(co)  (PyCode_GetNumFree(co) > 0)
  #define __Pyx_PyFrame_SetLineNumber(frame, lineno) _PyFrame_SetLineNumber((frame), (lineno))
#else
  #define __Pyx_PyCode_HasFreeVars(co)  (PyCode_GetNumFree(co) > 0)
  #define __Pyx_PyFrame_SetLineNumber(frame, lineno)  (frame)->f_lineno = (lineno)
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
  #define __Pyx_PyThreadState_Current PyThreadState_Get()
#elif !CYTHON_FAST_THREAD_STATE
  #define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x030d00A1
  #define __Pyx_PyThreadState_Current PyThreadState_GetUnchecked()
#else
  #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#endif
#if CYTHON_USE_MODULE_STATE
static CYTHON_INLINE void *__Pyx__PyModule_GetState(PyObject *op)
{
    void *result;
    result = PyModule_GetState(op);
    if (!result)
        Py_FatalError("Couldn't find the module state");
    return result;
}
#define __Pyx_PyModule_GetState(o) (__pyx_mstatetype *)__Pyx__PyModule_GetState(o)
#else
#define __Pyx_PyModule_GetState(op) ((void)op,__pyx_mstate_global)
#endif
#define __Pyx_PyObject_GetSlot(obj, name, func_ctype)  __Pyx_PyType_GetSlot(Py_TYPE((PyObject *) obj), name, func_ctype)
#define __Pyx_PyObject_TryGetSlot(obj, name, func_ctype) __Pyx_PyType_TryGetSlot(Py_TYPE(obj), name, func_ctype)
#define __Pyx_PyObject_GetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_GetSubSlot(Py_TYPE(obj), sub, name, func_ctype)
#define __Pyx_PyObject_TryGetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_TryGetSubSlot(Py_TYPE(obj), sub, name, func_ctype)
#if CYTHON_USE_TYPE_SLOTS
  #define __Pyx_PyType_GetSlot(type, name, func_ctype)  ((type)->name)
  #define __Pyx_PyType_TryGetSlot(type, name, func_ctype) __Pyx_PyType_GetSlot(type, name, func_ctype)
  #define __Pyx_PyType_GetSubSlot(type, sub, name, func_ctype) (((type)->sub) ? ((type)->sub->name) : NULL)
  #define __Pyx_PyType_TryGetSubSlot(type, sub, name, func_ctype) __Pyx_PyType_GetSubSlot(type, sub, name, func_ctype)
#else
  #define __Pyx_PyType_GetSlot(type, name, func_ctype)  ((func_ctype) PyType_GetSlot((type), Py_##name))
  #define __Pyx_PyType_TryGetSlot(type, name, func_ctype)\
    ((__PYX_LIMITED_VERSION_HEX >= 0x030A0000 ||\
     (PyType_GetFlags(type) & Py_TPFLAGS_HEAPTYPE) || __Pyx_get_runtime_version() >= 0x030A0000) ?\
     __Pyx_PyType_GetSlot(type, name, func_ctype) : NULL)
  #define __Pyx_PyType_GetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_GetSlot(obj, name, func_ctype)
  #define __Pyx_PyType_TryGetSubSlot(obj, sub, name, func_ctype) __Pyx_PyType_TryGetSlot(obj, name, func_ctype)
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n)  ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n)  PyDict_New()
#endif
#define __Pyx_PyNumber_Divide(x,y)         PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y)  PyNumber_InPlaceTrueDivide(x,y)
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStrWithError(dict, name)  _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) {
    PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name);
    if (res == NULL) PyErr_Clear();
    return res;
}
#elif !CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000
#define __Pyx_PyDict_GetItemStrWithError  PyDict_GetItemWithError
#define __Pyx_PyDict_GetItemStr           PyDict_GetItem
#else
static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) {
#if CYTHON_COMPILING_IN_PYPY
    return PyDict_GetItem(dict, name);
#else
    PyDictEntry *ep;
    PyDictObject *mp = (PyDictObject*) dict;
    long hash = ((PyStringObject *) name)->ob_shash;
    assert(hash != -1);
    ep = (mp->ma_lookup)(mp, name, hash);
    if (ep == NULL) {
        return NULL;
    }
    return ep->me_value;
#endif
}
#define __Pyx_PyDict_GetItemStr           PyDict_GetItem
#endif
#if CYTHON_USE_TYPE_SLOTS
  #define __Pyx_PyType_GetFlags(tp)   (((PyTypeObject *)tp)->tp_flags)
  #define __Pyx_PyType_HasFeature(type, feature)  ((__Pyx_PyType_GetFlags(type) & (feature)) != 0)
#else
  #define __Pyx_PyType_GetFlags(tp)   (PyType_GetFlags((PyTypeObject *)tp))
  #define __Pyx_PyType_HasFeature(type, feature)  PyType_HasFeature(type, feature)
#endif
#define __Pyx_PyObject_GetIterNextFunc(iterator)  __Pyx_PyObject_GetSlot(iterator, tp_iternext, iternextfunc)
#if CYTHON_USE_TYPE_SPECS
#define __Pyx_PyHeapTypeObject_GC_Del(obj)  {\
    PyTypeObject *type = Py_TYPE((PyObject*)obj);\
    assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\
    PyObject_GC_Del(obj);\
    Py_DECREF(type);\
}
#else
#define __Pyx_PyHeapTypeObject_GC_Del(obj)  PyObject_GC_Del(obj)
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
  #define __Pyx_PyUnicode_READY(op)       (0)
  #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i)
  #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u)   ((void)u, 1114111U)
  #define __Pyx_PyUnicode_KIND(u)         ((void)u, (0))
  #define __Pyx_PyUnicode_DATA(u)         ((void*)u)
  #define __Pyx_PyUnicode_READ(k, d, i)   ((void)k, PyUnicode_ReadChar((PyObject*)(d), i))
  #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != PyUnicode_GetLength(u))
#else
  #if PY_VERSION_HEX >= 0x030C0000
    #define __Pyx_PyUnicode_READY(op)       (0)
  #else
    #define __Pyx_PyUnicode_READY(op)       (likely(PyUnicode_IS_READY(op)) ?\
                                                0 : _PyUnicode_Ready((PyObject *)(op)))
  #endif
  #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
  #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u)   PyUnicode_MAX_CHAR_VALUE(u)
  #define __Pyx_PyUnicode_KIND(u)         ((int)PyUnicode_KIND(u))
  #define __Pyx_PyUnicode_DATA(u)         PyUnicode_DATA(u)
  #define __Pyx_PyUnicode_READ(k, d, i)   PyUnicode_READ(k, d, i)
  #define __Pyx_PyUnicode_WRITE(k, d, i, ch)  PyUnicode_WRITE(k, d, i, (Py_UCS4) ch)
  #if PY_VERSION_HEX >= 0x030C0000
    #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != PyUnicode_GET_LENGTH(u))
  #else
    #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
    #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
    #else
    #define __Pyx_PyUnicode_IS_TRUE(u)      (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
    #endif
  #endif
#endif
#if CYTHON_COMPILING_IN_PYPY
  #define __Pyx_PyUnicode_Concat(a, b)      PyNumber_Add(a, b)
  #define __Pyx_PyUnicode_ConcatSafe(a, b)  PyNumber_Add(a, b)
#else
  #define __Pyx_PyUnicode_Concat(a, b)      PyUnicode_Concat(a, b)
  #define __Pyx_PyUnicode_ConcatSafe(a, b)  ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
      PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY
  #if !defined(PyUnicode_DecodeUnicodeEscape)
    #define PyUnicode_DecodeUnicodeEscape(s, size, errors)  PyUnicode_Decode(s, size, "unicode_escape", errors)
  #endif
  #if !defined(PyUnicode_Contains)
    #define PyUnicode_Contains(u, s)  PySequence_Contains(u, s)
  #endif
  #if !defined(PyByteArray_Check)
    #define PyByteArray_Check(obj)  PyObject_TypeCheck(obj, &PyByteArray_Type)
  #endif
  #if !defined(PyObject_Format)
    #define PyObject_Format(obj, fmt)  PyObject_CallMethod(obj, "__format__", "O", fmt)
  #endif
#endif
#define __Pyx_PyUnicode_FormatSafe(a, b)  ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030E0000
  #define __Pyx_PySequence_ListKeepNew(obj)\
    (likely(PyList_CheckExact(obj) && PyUnstable_Object_IsUniquelyReferenced(obj)) ? __Pyx_NewRef(obj) : PySequence_List(obj))
#elif CYTHON_COMPILING_IN_CPYTHON
  #define __Pyx_PySequence_ListKeepNew(obj)\
    (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj))
#else
  #define __Pyx_PySequence_ListKeepNew(obj)  PySequence_List(obj)
#endif
#ifndef PySet_CheckExact
  #define PySet_CheckExact(obj)        __Pyx_IS_TYPE(obj, &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
  #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
  #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
  #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
  #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
enum __Pyx_ReferenceSharing {
  __Pyx_ReferenceSharing_DefinitelyUnique, // We created it so we know it's unshared - no need to check
  __Pyx_ReferenceSharing_OwnStrongReference,
  __Pyx_ReferenceSharing_FunctionArgument,
  __Pyx_ReferenceSharing_SharedReference, // Never trust it to be unshared because it's a global or similar
};
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && PY_VERSION_HEX >= 0x030E0000
#define __Pyx_IS_UNIQUELY_REFERENCED(o, sharing)\
    (sharing == __Pyx_ReferenceSharing_DefinitelyUnique ? 1 :\
      (sharing == __Pyx_ReferenceSharing_FunctionArgument ? PyUnstable_Object_IsUniqueReferencedTemporary(o) :\
      (sharing == __Pyx_ReferenceSharing_OwnStrongReference ? PyUnstable_Object_IsUniquelyReferenced(o) : 0)))
#elif (CYTHON_COMPILING_IN_CPYTHON && !CYTHON_COMPILING_IN_CPYTHON_FREETHREADING) || CYTHON_COMPILING_IN_LIMITED_API
#define __Pyx_IS_UNIQUELY_REFERENCED(o, sharing) (((void)sharing), Py_REFCNT(o) == 1)
#else
#define __Pyx_IS_UNIQUELY_REFERENCED(o, sharing) (((void)o), ((void)sharing), 0)
#endif
#if CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
  #if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
    #define __Pyx_PyList_GetItemRef(o, i) PyList_GetItemRef(o, i)
  #elif CYTHON_COMPILING_IN_LIMITED_API || !CYTHON_ASSUME_SAFE_MACROS
    #define __Pyx_PyList_GetItemRef(o, i) (likely((i) >= 0) ? PySequence_GetItem(o, i) : (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
  #else
    #define __Pyx_PyList_GetItemRef(o, i) PySequence_ITEM(o, i)
  #endif
#elif CYTHON_COMPILING_IN_LIMITED_API || !CYTHON_ASSUME_SAFE_MACROS
  #if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
    #define __Pyx_PyList_GetItemRef(o, i) PyList_GetItemRef(o, i)
  #else
    #define __Pyx_PyList_GetItemRef(o, i) __Pyx_XNewRef(PyList_GetItem(o, i))
  #endif
#else
  #define __Pyx_PyList_GetItemRef(o, i) __Pyx_NewRef(PyList_GET_ITEM(o, i))
#endif
#if CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS && !CYTHON_COMPILING_IN_LIMITED_API && CYTHON_ASSUME_SAFE_MACROS
  #define __Pyx_PyList_GetItemRefFast(o, i, unsafe_shared) (__Pyx_IS_UNIQUELY_REFERENCED(o, unsafe_shared) ?\
    __Pyx_NewRef(PyList_GET_ITEM(o, i)) : __Pyx_PyList_GetItemRef(o, i))
#else
  #define __Pyx_PyList_GetItemRefFast(o, i, unsafe_shared) __Pyx_PyList_GetItemRef(o, i)
#endif
#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
#define __Pyx_PyDict_GetItemRef(dict, key, result) PyDict_GetItemRef(dict, key, result)
#elif CYTHON_AVOID_BORROWED_REFS || CYTHON_AVOID_THREAD_UNSAFE_BORROWED_REFS
static CYTHON_INLINE int __Pyx_PyDict_GetItemRef(PyObject *dict, PyObject *key, PyObject **result) {
  *result = PyObject_GetItem(dict, key);
  if (*result == NULL) {
    if (PyErr_ExceptionMatches(PyExc_KeyError)) {
      PyErr_Clear();
      return 0;
    }
    return -1;
  }
  return 1;
}
#else
static CYTHON_INLINE int __Pyx_PyDict_GetItemRef(PyObject *dict, PyObject *key, PyObject **result) {
  *result = PyDict_GetItemWithError(dict, key);
  if (*result == NULL) {
    return PyErr_Occurred() ? -1 : 0;
  }
  Py_INCREF(*result);
  return 1;
}
#endif
#if defined(CYTHON_DEBUG_VISIT_CONST) && CYTHON_DEBUG_VISIT_CONST
  #define __Pyx_VISIT_CONST(obj)  Py_VISIT(obj)
#else
  #define __Pyx_VISIT_CONST(obj)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
  #define __Pyx_PySequence_ITEM(o, i) PySequence_ITEM(o, i)
  #define __Pyx_PySequence_SIZE(seq)  Py_SIZE(seq)
  #define __Pyx_PyTuple_SET_ITEM(o, i, v) (PyTuple_SET_ITEM(o, i, v), (0))
  #define __Pyx_PyTuple_GET_ITEM(o, i) PyTuple_GET_ITEM(o, i)
  #define __Pyx_PyList_SET_ITEM(o, i, v) (PyList_SET_ITEM(o, i, v), (0))
  #define __Pyx_PyList_GET_ITEM(o, i) PyList_GET_ITEM(o, i)
#else
  #define __Pyx_PySequence_ITEM(o, i) PySequence_GetItem(o, i)
  #define __Pyx_PySequence_SIZE(seq)  PySequence_Size(seq)
  #define __Pyx_PyTuple_SET_ITEM(o, i, v) PyTuple_SetItem(o, i, v)
  #define __Pyx_PyTuple_GET_ITEM(o, i) PyTuple_GetItem(o, i)
  #define __Pyx_PyList_SET_ITEM(o, i, v) PyList_SetItem(o, i, v)
  #define __Pyx_PyList_GET_ITEM(o, i) PyList_GetItem(o, i)
#endif
#if CYTHON_ASSUME_SAFE_SIZE
  #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_GET_SIZE(o)
  #define __Pyx_PyList_GET_SIZE(o) PyList_GET_SIZE(o)
  #define __Pyx_PySet_GET_SIZE(o) PySet_GET_SIZE(o)
  #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o)
  #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_GET_SIZE(o)
  #define __Pyx_PyUnicode_GET_LENGTH(o) PyUnicode_GET_LENGTH(o)
#else
  #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_Size(o)
  #define __Pyx_PyList_GET_SIZE(o) PyList_Size(o)
  #define __Pyx_PySet_GET_SIZE(o) PySet_Size(o)
  #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_Size(o)
  #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_Size(o)
  #define __Pyx_PyUnicode_GET_LENGTH(o) PyUnicode_GetLength(o)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_InternFromString)
  #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#define __Pyx_PyLong_FromHash_t PyLong_FromSsize_t
#define __Pyx_PyLong_AsHash_t   __Pyx_PyIndex_AsSsize_t
#if __PYX_LIMITED_VERSION_HEX >= 0x030A0000
    #define __Pyx_PySendResult PySendResult
#else
    typedef enum {
        PYGEN_RETURN = 0,
        PYGEN_ERROR = -1,
        PYGEN_NEXT = 1,
    } __Pyx_PySendResult;
#endif
#if CYTHON_COMPILING_IN_LIMITED_API || PY_VERSION_HEX < 0x030A00A3
  typedef __Pyx_PySendResult (*__Pyx_pyiter_sendfunc)(PyObject *iter, PyObject *value, PyObject **result);
#else
  #define __Pyx_pyiter_sendfunc sendfunc
#endif
#if !CYTHON_USE_AM_SEND
#define __PYX_HAS_PY_AM_SEND 0
#elif __PYX_LIMITED_VERSION_HEX >= 0x030A0000
#define __PYX_HAS_PY_AM_SEND 1
#else
#define __PYX_HAS_PY_AM_SEND 2  // our own backported implementation
#endif
#if __PYX_HAS_PY_AM_SEND < 2
    #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#else
    typedef struct {
        unaryfunc am_await;
        unaryfunc am_aiter;
        unaryfunc am_anext;
        __Pyx_pyiter_sendfunc am_send;
    } __Pyx_PyAsyncMethodsStruct;
    #define __Pyx_SlotTpAsAsync(s) ((PyAsyncMethods*)(s))
#endif
#if CYTHON_USE_AM_SEND && PY_VERSION_HEX < 0x030A00F0
    #define __Pyx_TPFLAGS_HAVE_AM_SEND (1UL << 21)
#else
    #define __Pyx_TPFLAGS_HAVE_AM_SEND (0)
#endif
#if PY_VERSION_HEX >= 0x03090000
#define __Pyx_PyInterpreterState_Get() PyInterpreterState_Get()
#else
#define __Pyx_PyInterpreterState_Get() PyThreadState_Get()->interp
#endif
#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030A0000
#ifdef __cplusplus
extern "C"
#endif
PyAPI_FUNC(void *) PyMem_Calloc(size_t nelem, size_t elsize);
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
static int __Pyx_init_co_variable(PyObject *inspect, const char* name, int *write_to) {
    int value;
    PyObject *py_value = PyObject_GetAttrString(inspect, name);
    if (!py_value) return 0;
    value = (int) PyLong_AsLong(py_value);
    Py_DECREF(py_value);
    *write_to = value;
    return value != -1 || !PyErr_Occurred();
}
static int __Pyx_init_co_variables(void) {
    PyObject *inspect;
    int result;
    inspect = PyImport_ImportModule("inspect");
    result =
#if !defined(CO_OPTIMIZED)
        __Pyx_init_co_variable(inspect, "CO_OPTIMIZED", &CO_OPTIMIZED) &&
#endif
#if !defined(CO_NEWLOCALS)
        __Pyx_init_co_variable(inspect, "CO_NEWLOCALS", &CO_NEWLOCALS) &&
#endif
#if !defined(CO_VARARGS)
        __Pyx_init_co_variable(inspect, "CO_VARARGS", &CO_VARARGS) &&
#endif
#if !defined(CO_VARKEYWORDS)
        __Pyx_init_co_variable(inspect, "CO_VARKEYWORDS", &CO_VARKEYWORDS) &&
#endif
#if !defined(CO_ASYNC_GENERATOR)
        __Pyx_init_co_variable(inspect, "CO_ASYNC_GENERATOR", &CO_ASYNC_GENERATOR) &&
#endif
#if !defined(CO_GENERATOR)
        __Pyx_init_co_variable(inspect, "CO_GENERATOR", &CO_GENERATOR) &&
#endif
#if !defined(CO_COROUTINE)
        __Pyx_init_co_variable(inspect, "CO_COROUTINE", &CO_COROUTINE) &&
#endif
        1;
    Py_DECREF(inspect);
    return result ? 0 : -1;
}
#else
static int __Pyx_init_co_variables(void) {
    return 0;  // It's a limited API-only feature
}
#endif

/* MathInitCode */
#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS)
  #ifndef _USE_MATH_DEFINES
    #define _USE_MATH_DEFINES
  #endif
#endif
#include <math.h>
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif

#ifndef CYTHON_CLINE_IN_TRACEBACK_RUNTIME
#define CYTHON_CLINE_IN_TRACEBACK_RUNTIME 0
#endif
#ifndef CYTHON_CLINE_IN_TRACEBACK
#define CYTHON_CLINE_IN_TRACEBACK CYTHON_CLINE_IN_TRACEBACK_RUNTIME
#endif
#if CYTHON_CLINE_IN_TRACEBACK
#define __PYX_MARK_ERR_POS(f_index, lineno)  { __pyx_filename = __pyx_f[f_index]; (void) __pyx_filename; __pyx_lineno = lineno; (void) __pyx_lineno; __pyx_clineno = __LINE__; (void) __pyx_clineno; }
#else
#define __PYX_MARK_ERR_POS(f_index, lineno)  { __pyx_filename = __pyx_f[f_index]; (void) __pyx_filename; __pyx_lineno = lineno; (void) __pyx_lineno; (void) __pyx_clineno; }
#endif
#define __PYX_ERR(f_index, lineno, Ln_error) \
    { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }

#ifdef CYTHON_EXTERN_C
    #undef __PYX_EXTERN_C
    #define __PYX_EXTERN_C CYTHON_EXTERN_C
#elif defined(__PYX_EXTERN_C)
    #ifdef _MSC_VER
    #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.")
    #else
    #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.
    #endif
#else
    #define __PYX_EXTERN_C extern "C++"
#endif

#define __PYX_HAVE__cuda__bindings__cy_nvml
#define __PYX_HAVE_API__cuda__bindings__cy_nvml
/* Early includes */
#include <stdint.h>
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */

#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif

#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed)  (\
    (sizeof(type) < sizeof(Py_ssize_t))  ||\
    (sizeof(type) > sizeof(Py_ssize_t) &&\
          likely(v < (type)PY_SSIZE_T_MAX ||\
                 v == (type)PY_SSIZE_T_MAX)  &&\
          (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
                                v == (type)PY_SSIZE_T_MIN)))  ||\
    (sizeof(type) == sizeof(Py_ssize_t) &&\
          (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
                               v == (type)PY_SSIZE_T_MAX)))  )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
    return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
    #include <cstdlib>
    #define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
    #define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
    #define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
    #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
    #define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
    #define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
    #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s);
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
static CYTHON_INLINE PyObject* __Pyx_PyByteArray_FromString(const char*);
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString        PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if CYTHON_ASSUME_SAFE_MACROS
    #define __Pyx_PyBytes_AsWritableString(s)     ((char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyBytes_AsWritableSString(s)    ((signed char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyBytes_AsWritableUString(s)    ((unsigned char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyBytes_AsString(s)     ((const char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyBytes_AsSString(s)    ((const signed char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyBytes_AsUString(s)    ((const unsigned char*) PyBytes_AS_STRING(s))
    #define __Pyx_PyByteArray_AsString(s) PyByteArray_AS_STRING(s)
#else
    #define __Pyx_PyBytes_AsWritableString(s)     ((char*) PyBytes_AsString(s))
    #define __Pyx_PyBytes_AsWritableSString(s)    ((signed char*) PyBytes_AsString(s))
    #define __Pyx_PyBytes_AsWritableUString(s)    ((unsigned char*) PyBytes_AsString(s))
    #define __Pyx_PyBytes_AsString(s)     ((const char*) PyBytes_AsString(s))
    #define __Pyx_PyBytes_AsSString(s)    ((const signed char*) PyBytes_AsString(s))
    #define __Pyx_PyBytes_AsUString(s)    ((const unsigned char*) PyBytes_AsString(s))
    #define __Pyx_PyByteArray_AsString(s) PyByteArray_AsString(s)
#endif
#define __Pyx_PyObject_AsWritableString(s)    ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s)    ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s)    ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s)    ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s)    ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s)  __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s)   __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s)   __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
#define __Pyx_PyUnicode_FromOrdinal(o)       PyUnicode_FromOrdinal((int)o)
#define __Pyx_PyUnicode_AsUnicode            PyUnicode_AsUnicode
static CYTHON_INLINE PyObject *__Pyx_NewRef(PyObject *obj) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030a0000 || defined(Py_NewRef)
    return Py_NewRef(obj);
#else
    Py_INCREF(obj);
    return obj;
#endif
}
static CYTHON_INLINE PyObject *__Pyx_XNewRef(PyObject *obj) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030a0000 || defined(Py_XNewRef)
    return Py_XNewRef(obj);
#else
    Py_XINCREF(obj);
    return obj;
#endif
}
static CYTHON_INLINE PyObject *__Pyx_Owned_Py_None(int b);
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Long(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
    (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyLong_FromSize_t(size_t);
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*);
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#define __Pyx_PyFloat_AS_DOUBLE(x) PyFloat_AS_DOUBLE(x)
#else
#define __Pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#define __Pyx_PyFloat_AS_DOUBLE(x) PyFloat_AsDouble(x)
#endif
#define __Pyx_PyFloat_AsFloat(x) ((float) __Pyx_PyFloat_AsDouble(x))
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#if CYTHON_USE_PYLONG_INTERNALS
  #if PY_VERSION_HEX >= 0x030C00A7
  #ifndef _PyLong_SIGN_MASK
    #define _PyLong_SIGN_MASK 3
  #endif
  #ifndef _PyLong_NON_SIZE_BITS
    #define _PyLong_NON_SIZE_BITS 3
  #endif
  #define __Pyx_PyLong_Sign(x)  (((PyLongObject*)x)->long_value.lv_tag & _PyLong_SIGN_MASK)
  #define __Pyx_PyLong_IsNeg(x)  ((__Pyx_PyLong_Sign(x) & 2) != 0)
  #define __Pyx_PyLong_IsNonNeg(x)  (!__Pyx_PyLong_IsNeg(x))
  #define __Pyx_PyLong_IsZero(x)  (__Pyx_PyLong_Sign(x) & 1)
  #define __Pyx_PyLong_IsPos(x)  (__Pyx_PyLong_Sign(x) == 0)
  #define __Pyx_PyLong_CompactValueUnsigned(x)  (__Pyx_PyLong_Digits(x)[0])
  #define __Pyx_PyLong_DigitCount(x)  ((Py_ssize_t) (((PyLongObject*)x)->long_value.lv_tag >> _PyLong_NON_SIZE_BITS))
  #define __Pyx_PyLong_SignedDigitCount(x)\
        ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * __Pyx_PyLong_DigitCount(x))
  #if defined(PyUnstable_Long_IsCompact) && defined(PyUnstable_Long_CompactValue)
    #define __Pyx_PyLong_IsCompact(x)     PyUnstable_Long_IsCompact((PyLongObject*) x)
    #define __Pyx_PyLong_CompactValue(x)  PyUnstable_Long_CompactValue((PyLongObject*) x)
  #else
    #define __Pyx_PyLong_IsCompact(x)     (((PyLongObject*)x)->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS))
    #define __Pyx_PyLong_CompactValue(x)  ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * (Py_ssize_t) __Pyx_PyLong_Digits(x)[0])
  #endif
  typedef Py_ssize_t  __Pyx_compact_pylong;
  typedef size_t  __Pyx_compact_upylong;
  #else
  #define __Pyx_PyLong_IsNeg(x)  (Py_SIZE(x) < 0)
  #define __Pyx_PyLong_IsNonNeg(x)  (Py_SIZE(x) >= 0)
  #define __Pyx_PyLong_IsZero(x)  (Py_SIZE(x) == 0)
  #define __Pyx_PyLong_IsPos(x)  (Py_SIZE(x) > 0)
  #define __Pyx_PyLong_CompactValueUnsigned(x)  ((Py_SIZE(x) == 0) ? 0 : __Pyx_PyLong_Digits(x)[0])
  #define __Pyx_PyLong_DigitCount(x)  __Pyx_sst_abs(Py_SIZE(x))
  #define __Pyx_PyLong_SignedDigitCount(x)  Py_SIZE(x)
  #define __Pyx_PyLong_IsCompact(x)  (Py_SIZE(x) == 0 || Py_SIZE(x) == 1 || Py_SIZE(x) == -1)
  #define __Pyx_PyLong_CompactValue(x)\
        ((Py_SIZE(x) == 0) ? (sdigit) 0 : ((Py_SIZE(x) < 0) ? -(sdigit)__Pyx_PyLong_Digits(x)[0] : (sdigit)__Pyx_PyLong_Digits(x)[0]))
  typedef sdigit  __Pyx_compact_pylong;
  typedef digit  __Pyx_compact_upylong;
  #endif
  #if PY_VERSION_HEX >= 0x030C00A5
  #define __Pyx_PyLong_Digits(x)  (((PyLongObject*)x)->long_value.ob_digit)
  #else
  #define __Pyx_PyLong_Digits(x)  (((PyLongObject*)x)->ob_digit)
  #endif
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_UTF8
  #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#elif __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
  #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeASCII(c_str, size, NULL)
#else
  #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#endif


/* Test for GCC > 2.95 */
#if defined(__GNUC__)     && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
  #define likely(x)   __builtin_expect(!!(x), 1)
  #define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
  #define likely(x)   (x)
  #define unlikely(x) (x)
#endif /* __GNUC__ */
/* PretendToInitialize */
#ifdef __cplusplus
#if __cplusplus > 201103L
#include <type_traits>
#endif
template <typename T>
static void __Pyx_pretend_to_initialize(T* ptr) {
#if __cplusplus > 201103L
    if ((std::is_trivially_default_constructible<T>::value))
#endif
        *ptr = T();
    (void)ptr;
}
#else
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
#endif


#if !CYTHON_USE_MODULE_STATE
static PyObject *__pyx_m = NULL;
#endif
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * const __pyx_cfilenm = __FILE__;
static const char *__pyx_filename;

/* #### Code section: filename_table ### */

static const char* const __pyx_f[] = {
  "cuda/bindings/cy_nvml.pyx",
};
/* #### Code section: utility_code_proto_before_types ### */
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
  #define __PYX_FORCE_INIT_THREADS 0
#endif

/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()

/* Atomics.proto (used by CodeObjectCache) */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
    #define CYTHON_ATOMICS 1
#endif
#define __PYX_CYTHON_ATOMICS_ENABLED() CYTHON_ATOMICS
#define __PYX_GET_CYTHON_COMPILING_IN_CPYTHON_FREETHREADING() CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
#define __pyx_atomic_int_type int
#define __pyx_nonatomic_int_type int
#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\
                        (__STDC_VERSION__ >= 201112L) &&\
                        !defined(__STDC_NO_ATOMICS__))
    #include <stdatomic.h>
#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\
                    (__cplusplus >= 201103L) ||\
                    (defined(_MSC_VER) && _MSC_VER >= 1700)))
    #include <atomic>
#endif
#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\
                        (__STDC_VERSION__ >= 201112L) &&\
                        !defined(__STDC_NO_ATOMICS__) &&\
                       ATOMIC_INT_LOCK_FREE == 2)
    #undef __pyx_atomic_int_type
    #define __pyx_atomic_int_type atomic_int
    #define __pyx_atomic_ptr_type atomic_uintptr_t
    #define __pyx_nonatomic_ptr_type uintptr_t
    #define __pyx_atomic_incr_relaxed(value) atomic_fetch_add_explicit(value, 1, memory_order_relaxed)
    #define __pyx_atomic_incr_acq_rel(value) atomic_fetch_add_explicit(value, 1, memory_order_acq_rel)
    #define __pyx_atomic_decr_acq_rel(value) atomic_fetch_sub_explicit(value, 1, memory_order_acq_rel)
    #define __pyx_atomic_sub(value, arg) atomic_fetch_sub(value, arg)
    #define __pyx_atomic_int_cmp_exchange(value, expected, desired) atomic_compare_exchange_strong(value, expected, desired)
    #define __pyx_atomic_load(value) atomic_load(value)
    #define __pyx_atomic_store(value, new_value) atomic_store(value, new_value)
    #define __pyx_atomic_pointer_load_relaxed(value) atomic_load_explicit(value, memory_order_relaxed)
    #define __pyx_atomic_pointer_load_acquire(value) atomic_load_explicit(value, memory_order_acquire)
    #define __pyx_atomic_pointer_exchange(value, new_value) atomic_exchange(value, (__pyx_nonatomic_ptr_type)new_value)
    #define __pyx_atomic_pointer_cmp_exchange(value, expected, desired) atomic_compare_exchange_strong(value, expected, desired)
    #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER)
        #pragma message ("Using standard C atomics")
    #elif defined(__PYX_DEBUG_ATOMICS)
        #warning "Using standard C atomics"
    #endif
#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\
                    (__cplusplus >= 201103L) ||\
\
                    (defined(_MSC_VER) && _MSC_VER >= 1700)) &&\
                    ATOMIC_INT_LOCK_FREE == 2)
    #undef __pyx_atomic_int_type
    #define __pyx_atomic_int_type std::atomic_int
    #define __pyx_atomic_ptr_type std::atomic_uintptr_t
    #define __pyx_nonatomic_ptr_type uintptr_t
    #define __pyx_atomic_incr_relaxed(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_relaxed)
    #define __pyx_atomic_incr_acq_rel(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_acq_rel)
    #define __pyx_atomic_decr_acq_rel(value) std::atomic_fetch_sub_explicit(value, 1, std::memory_order_acq_rel)
    #define __pyx_atomic_sub(value, arg) std::atomic_fetch_sub(value, arg)
    #define __pyx_atomic_int_cmp_exchange(value, expected, desired) std::atomic_compare_exchange_strong(value, expected, desired)
    #define __pyx_atomic_load(value) std::atomic_load(value)
    #define __pyx_atomic_store(value, new_value) std::atomic_store(value, new_value)
    #define __pyx_atomic_pointer_load_relaxed(value) std::atomic_load_explicit(value, std::memory_order_relaxed)
    #define __pyx_atomic_pointer_load_acquire(value) std::atomic_load_explicit(value, std::memory_order_acquire)
    #define __pyx_atomic_pointer_exchange(value, new_value) std::atomic_exchange(value, (__pyx_nonatomic_ptr_type)new_value)
    #define __pyx_atomic_pointer_cmp_exchange(value, expected, desired) std::atomic_compare_exchange_strong(value, expected, desired)
    #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER)
        #pragma message ("Using standard C++ atomics")
    #elif defined(__PYX_DEBUG_ATOMICS)
        #warning "Using standard C++ atomics"
    #endif
#elif CYTHON_ATOMICS && (__GNUC__ >= 5 || (__GNUC__ == 4 &&\
                    (__GNUC_MINOR__ > 1 ||\
                    (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 2))))
    #define __pyx_atomic_ptr_type void*
    #define __pyx_nonatomic_ptr_type void*
    #define __pyx_atomic_incr_relaxed(value) __sync_fetch_and_add(value, 1)
    #define __pyx_atomic_incr_acq_rel(value) __sync_fetch_and_add(value, 1)
    #define __pyx_atomic_decr_acq_rel(value) __sync_fetch_and_sub(value, 1)
    #define __pyx_atomic_sub(value, arg) __sync_fetch_and_sub(value, arg)
    static CYTHON_INLINE int __pyx_atomic_int_cmp_exchange(__pyx_atomic_int_type* value, __pyx_nonatomic_int_type* expected, __pyx_nonatomic_int_type desired) {
        __pyx_nonatomic_int_type old = __sync_val_compare_and_swap(value, *expected, desired);
        int result = old == *expected;
        *expected = old;
        return result;
    }
    #define __pyx_atomic_load(value) __sync_fetch_and_add(value, 0)
    #define __pyx_atomic_store(value, new_value) __sync_lock_test_and_set(value, new_value)
    #define __pyx_atomic_pointer_load_relaxed(value) __sync_fetch_and_add(value, 0)
    #define __pyx_atomic_pointer_load_acquire(value) __sync_fetch_and_add(value, 0)
    #define __pyx_atomic_pointer_exchange(value, new_value) __sync_lock_test_and_set(value, (__pyx_atomic_ptr_type)new_value)
    static CYTHON_INLINE int __pyx_atomic_pointer_cmp_exchange(__pyx_atomic_ptr_type* value, __pyx_nonatomic_ptr_type* expected, __pyx_nonatomic_ptr_type desired) {
        __pyx_nonatomic_ptr_type old = __sync_val_compare_and_swap(value, *expected, desired);
        int result = old == *expected;
        *expected = old;
        return result;
    }
    #ifdef __PYX_DEBUG_ATOMICS
        #warning "Using GNU atomics"
    #endif
#elif CYTHON_ATOMICS && defined(_MSC_VER)
    #include <intrin.h>
    #undef __pyx_atomic_int_type
    #define __pyx_atomic_int_type long
    #define __pyx_atomic_ptr_type void*
    #undef __pyx_nonatomic_int_type
    #define __pyx_nonatomic_int_type long
    #define __pyx_nonatomic_ptr_type void*
    #pragma intrinsic (_InterlockedExchangeAdd, _InterlockedExchange, _InterlockedCompareExchange, _InterlockedCompareExchangePointer, _InterlockedExchangePointer)
    #define __pyx_atomic_incr_relaxed(value) _InterlockedExchangeAdd(value, 1)
    #define __pyx_atomic_incr_acq_rel(value) _InterlockedExchangeAdd(value, 1)
    #define __pyx_atomic_decr_acq_rel(value) _InterlockedExchangeAdd(value, -1)
    #define __pyx_atomic_sub(value, arg) _InterlockedExchangeAdd(value, -arg)
    static CYTHON_INLINE int __pyx_atomic_int_cmp_exchange(__pyx_atomic_int_type* value, __pyx_nonatomic_int_type* expected, __pyx_nonatomic_int_type desired) {
        __pyx_nonatomic_int_type old = _InterlockedCompareExchange(value, desired, *expected);
        int result = old == *expected;
        *expected = old;
        return result;
    }
    #define __pyx_atomic_load(value) _InterlockedExchangeAdd(value, 0)
    #define __pyx_atomic_store(value, new_value) _InterlockedExchange(value, new_value)
    #define __pyx_atomic_pointer_load_relaxed(value) *(void * volatile *)value
    #define __pyx_atomic_pointer_load_acquire(value) _InterlockedCompareExchangePointer(value, 0, 0)
    #define __pyx_atomic_pointer_exchange(value, new_value) _InterlockedExchangePointer(value, (__pyx_atomic_ptr_type)new_value)
    static CYTHON_INLINE int __pyx_atomic_pointer_cmp_exchange(__pyx_atomic_ptr_type* value, __pyx_nonatomic_ptr_type* expected, __pyx_nonatomic_ptr_type desired) {
        __pyx_atomic_ptr_type old = _InterlockedCompareExchangePointer(value, desired, *expected);
        int result = old == *expected;
        *expected = old;
        return result;
    }
    #ifdef __PYX_DEBUG_ATOMICS
        #pragma message ("Using MSVC atomics")
    #endif
#else
    #undef CYTHON_ATOMICS
    #define CYTHON_ATOMICS 0
    #ifdef __PYX_DEBUG_ATOMICS
        #warning "Not using atomics"
    #endif
#endif

/* #### Code section: numeric_typedefs ### */

/* "cuda/bindings/cy_nvml.pxd":746
 *     unsigned char moduleId
 * 
 * ctypedef unsigned int nvmlDeviceArchitecture_t 'nvmlDeviceArchitecture_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned int nvmlBusType_t 'nvmlBusType_t'
 * ctypedef unsigned int nvmlFanControlPolicy_t 'nvmlFanControlPolicy_t'
*/
typedef unsigned int nvmlDeviceArchitecture_t;

/* "cuda/bindings/cy_nvml.pxd":747
 * 
 * ctypedef unsigned int nvmlDeviceArchitecture_t 'nvmlDeviceArchitecture_t'
 * ctypedef unsigned int nvmlBusType_t 'nvmlBusType_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned int nvmlFanControlPolicy_t 'nvmlFanControlPolicy_t'
 * ctypedef unsigned int nvmlPowerSource_t 'nvmlPowerSource_t'
*/
typedef unsigned int nvmlBusType_t;

/* "cuda/bindings/cy_nvml.pxd":748
 * ctypedef unsigned int nvmlDeviceArchitecture_t 'nvmlDeviceArchitecture_t'
 * ctypedef unsigned int nvmlBusType_t 'nvmlBusType_t'
 * ctypedef unsigned int nvmlFanControlPolicy_t 'nvmlFanControlPolicy_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned int nvmlPowerSource_t 'nvmlPowerSource_t'
 * ctypedef unsigned char nvmlPowerScopeType_t 'nvmlPowerScopeType_t'
*/
typedef unsigned int nvmlFanControlPolicy_t;

/* "cuda/bindings/cy_nvml.pxd":749
 * ctypedef unsigned int nvmlBusType_t 'nvmlBusType_t'
 * ctypedef unsigned int nvmlFanControlPolicy_t 'nvmlFanControlPolicy_t'
 * ctypedef unsigned int nvmlPowerSource_t 'nvmlPowerSource_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned char nvmlPowerScopeType_t 'nvmlPowerScopeType_t'
 * ctypedef unsigned int nvmlVgpuTypeId_t 'nvmlVgpuTypeId_t'
*/
typedef unsigned int nvmlPowerSource_t;

/* "cuda/bindings/cy_nvml.pxd":750
 * ctypedef unsigned int nvmlFanControlPolicy_t 'nvmlFanControlPolicy_t'
 * ctypedef unsigned int nvmlPowerSource_t 'nvmlPowerSource_t'
 * ctypedef unsigned char nvmlPowerScopeType_t 'nvmlPowerScopeType_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned int nvmlVgpuTypeId_t 'nvmlVgpuTypeId_t'
 * ctypedef unsigned int nvmlVgpuInstance_t 'nvmlVgpuInstance_t'
*/
typedef unsigned char nvmlPowerScopeType_t;

/* "cuda/bindings/cy_nvml.pxd":751
 * ctypedef unsigned int nvmlPowerSource_t 'nvmlPowerSource_t'
 * ctypedef unsigned char nvmlPowerScopeType_t 'nvmlPowerScopeType_t'
 * ctypedef unsigned int nvmlVgpuTypeId_t 'nvmlVgpuTypeId_t'             # <<<<<<<<<<<<<<
 * ctypedef unsigned int nvmlVgpuInstance_t 'nvmlVgpuInstance_t'
 * ctypedef struct nvmlVgpuHeterogeneousMode_v1_t 'nvmlVgpuHeterogeneousMode_v1_t':
*/
typedef unsigned int nvmlVgpuTypeId_t;

/* "cuda/bindings/cy_nvml.pxd":752
 * ctypedef unsigned char nvmlPowerScopeType_t 'nvmlPowerScopeType_t'
 * ctypedef unsigned int nvmlVgpuTypeId_t 'nvmlVgpuTypeId_t'
 * ctypedef unsigned int nvmlVgpuInstance_t 'nvmlVgpuInstance_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlVgpuHeterogeneousMode_v1_t 'nvmlVgpuHeterogeneousMode_v1_t':
 *     unsigned int version
*/
typedef unsigned int nvmlVgpuInstance_t;

/* "cuda/bindings/cy_nvml.pxd":791
 *     unsigned long long attackerAdvantage
 * 
 * ctypedef unsigned char nvmlGpuFabricState_t 'nvmlGpuFabricState_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlSystemDriverBranchInfo_v1_t 'nvmlSystemDriverBranchInfo_v1_t':
 *     unsigned int version
*/
typedef unsigned char nvmlGpuFabricState_t;

/* "cuda/bindings/cy_nvml.pxd":796
 *     char branch[80]
 * 
 * ctypedef unsigned int nvmlAffinityScope_t 'nvmlAffinityScope_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlTemperature_v1_t 'nvmlTemperature_v1_t':
 *     unsigned int version
*/
typedef unsigned int nvmlAffinityScope_t;
/* #### Code section: complex_type_declarations ### */
/* #### Code section: type_declarations ### */

/*--- Type declarations ---*/
struct nvmlPciInfoExt_v1_t;
typedef struct nvmlPciInfoExt_v1_t nvmlPciInfoExt_v1_t;
struct nvmlCoolerInfo_v1_t;
typedef struct nvmlCoolerInfo_v1_t nvmlCoolerInfo_v1_t;
struct nvmlDramEncryptionInfo_v1_t;
typedef struct nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_v1_t;
struct nvmlMarginTemperature_v1_t;
typedef struct nvmlMarginTemperature_v1_t nvmlMarginTemperature_v1_t;
struct nvmlClockOffset_v1_t;
typedef struct nvmlClockOffset_v1_t nvmlClockOffset_v1_t;
struct nvmlFanSpeedInfo_v1_t;
typedef struct nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_v1_t;
struct nvmlDevicePerfModes_v1_t;
typedef struct nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_v1_t;
struct nvmlDeviceCurrentClockFreqs_v1_t;
typedef struct nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_v1_t;
struct nvmlEccSramErrorStatus_v1_t;
typedef struct nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_v1_t;
struct nvmlPlatformInfo_v2_t;
typedef struct nvmlPlatformInfo_v2_t nvmlPlatformInfo_v2_t;
struct nvmlVgpuHeterogeneousMode_v1_t;
typedef struct nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_v1_t;
struct nvmlVgpuPlacementId_v1_t;
typedef struct nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_v1_t;
struct nvmlVgpuPlacementList_v2_t;
typedef struct nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_v2_t;
struct nvmlVgpuTypeBar1Info_v1_t;
typedef struct nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_v1_t;
struct nvmlVgpuRuntimeState_v1_t;
typedef struct nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_v1_t;
struct nvmlSystemConfComputeSettings_v1_t;
typedef struct nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_v1_t;
struct nvmlConfComputeSetKeyRotationThresholdInfo_v1_t;
typedef struct nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_v1_t;
struct nvmlConfComputeGetKeyRotationThresholdInfo_v1_t;
typedef struct nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_v1_t;
struct nvmlSystemDriverBranchInfo_v1_t;
typedef struct nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_v1_t;
struct nvmlTemperature_v1_t;
typedef struct nvmlTemperature_v1_t nvmlTemperature_v1_t;
struct nvmlNvlinkSupportedBwModes_v1_t;
typedef struct nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_v1_t;
struct nvmlNvlinkGetBwMode_v1_t;
typedef struct nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_v1_t;
struct nvmlNvlinkSetBwMode_v1_t;
typedef struct nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_v1_t;
struct nvmlDeviceCapabilities_v1_t;
typedef struct nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_v1_t;
struct nvmlPowerSmoothingProfile_v1_t;
typedef struct nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_v1_t;
struct nvmlPowerSmoothingState_v1_t;
typedef struct nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_v1_t;
struct nvmlDeviceAddressingMode_v1_t;
typedef struct nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_v1_t;
struct nvmlRepairStatus_v1_t;
typedef struct nvmlRepairStatus_v1_t nvmlRepairStatus_v1_t;
struct nvmlPdi_v1_t;
typedef struct nvmlPdi_v1_t nvmlPdi_v1_t;
struct nvmlPciInfo_t;
typedef struct nvmlPciInfo_t nvmlPciInfo_t;
struct nvmlEccErrorCounts_t;
typedef struct nvmlEccErrorCounts_t nvmlEccErrorCounts_t;
struct nvmlUtilization_t;
typedef struct nvmlUtilization_t nvmlUtilization_t;
struct nvmlMemory_t;
typedef struct nvmlMemory_t nvmlMemory_t;
struct nvmlMemory_v2_t;
typedef struct nvmlMemory_v2_t nvmlMemory_v2_t;
struct nvmlBAR1Memory_t;
typedef struct nvmlBAR1Memory_t nvmlBAR1Memory_t;
struct nvmlProcessInfo_v1_t;
typedef struct nvmlProcessInfo_v1_t nvmlProcessInfo_v1_t;
struct nvmlProcessInfo_v2_t;
typedef struct nvmlProcessInfo_v2_t nvmlProcessInfo_v2_t;
struct nvmlProcessInfo_t;
typedef struct nvmlProcessInfo_t nvmlProcessInfo_t;
struct nvmlProcessDetail_v1_t;
typedef struct nvmlProcessDetail_v1_t nvmlProcessDetail_v1_t;
struct nvmlDeviceAttributes_t;
typedef struct nvmlDeviceAttributes_t nvmlDeviceAttributes_t;
struct nvmlC2cModeInfo_v1_t;
typedef struct nvmlC2cModeInfo_v1_t nvmlC2cModeInfo_v1_t;
struct nvmlRowRemapperHistogramValues_t;
typedef struct nvmlRowRemapperHistogramValues_t nvmlRowRemapperHistogramValues_t;
struct nvmlNvLinkUtilizationControl_t;
typedef struct nvmlNvLinkUtilizationControl_t nvmlNvLinkUtilizationControl_t;
struct nvmlBridgeChipInfo_t;
typedef struct nvmlBridgeChipInfo_t nvmlBridgeChipInfo_t;
union nvmlValue_t;
typedef union nvmlValue_t nvmlValue_t;
struct nvmlViolationTime_t;
typedef struct nvmlViolationTime_t nvmlViolationTime_t;
struct _anon_pod0;
typedef struct _anon_pod0 _anon_pod0;
union nvmlUUIDValue_t;
typedef union nvmlUUIDValue_t nvmlUUIDValue_t;
struct nvmlClkMonFaultInfo_t;
typedef struct nvmlClkMonFaultInfo_t nvmlClkMonFaultInfo_t;
struct nvmlProcessUtilizationSample_t;
typedef struct nvmlProcessUtilizationSample_t nvmlProcessUtilizationSample_t;
struct nvmlProcessUtilizationInfo_v1_t;
typedef struct nvmlProcessUtilizationInfo_v1_t nvmlProcessUtilizationInfo_v1_t;
struct nvmlPlatformInfo_v1_t;
typedef struct nvmlPlatformInfo_v1_t nvmlPlatformInfo_v1_t;
struct _anon_pod1;
typedef struct _anon_pod1 _anon_pod1;
struct nvmlVgpuPlacementList_v1_t;
typedef struct nvmlVgpuPlacementList_v1_t nvmlVgpuPlacementList_v1_t;
struct _anon_pod2;
typedef struct _anon_pod2 _anon_pod2;
struct _anon_pod3;
typedef struct _anon_pod3 _anon_pod3;
struct nvmlVgpuSchedulerLogEntry_t;
typedef struct nvmlVgpuSchedulerLogEntry_t nvmlVgpuSchedulerLogEntry_t;
struct _anon_pod4;
typedef struct _anon_pod4 _anon_pod4;
struct _anon_pod5;
typedef struct _anon_pod5 _anon_pod5;
struct nvmlVgpuSchedulerCapabilities_t;
typedef struct nvmlVgpuSchedulerCapabilities_t nvmlVgpuSchedulerCapabilities_t;
struct nvmlVgpuLicenseExpiry_t;
typedef struct nvmlVgpuLicenseExpiry_t nvmlVgpuLicenseExpiry_t;
struct nvmlGridLicenseExpiry_t;
typedef struct nvmlGridLicenseExpiry_t nvmlGridLicenseExpiry_t;
struct nvmlNvLinkPowerThres_t;
typedef struct nvmlNvLinkPowerThres_t nvmlNvLinkPowerThres_t;
struct nvmlHwbcEntry_t;
typedef struct nvmlHwbcEntry_t nvmlHwbcEntry_t;
struct nvmlLedState_t;
typedef struct nvmlLedState_t nvmlLedState_t;
struct nvmlUnitInfo_t;
typedef struct nvmlUnitInfo_t nvmlUnitInfo_t;
struct nvmlPSUInfo_t;
typedef struct nvmlPSUInfo_t nvmlPSUInfo_t;
struct nvmlUnitFanInfo_t;
typedef struct nvmlUnitFanInfo_t nvmlUnitFanInfo_t;
struct nvmlSystemEventData_v1_t;
typedef struct nvmlSystemEventData_v1_t nvmlSystemEventData_v1_t;
struct nvmlAccountingStats_t;
typedef struct nvmlAccountingStats_t nvmlAccountingStats_t;
struct nvmlFBCStats_t;
typedef struct nvmlFBCStats_t nvmlFBCStats_t;
struct nvmlConfComputeSystemCaps_t;
typedef struct nvmlConfComputeSystemCaps_t nvmlConfComputeSystemCaps_t;
struct nvmlConfComputeSystemState_t;
typedef struct nvmlConfComputeSystemState_t nvmlConfComputeSystemState_t;
struct nvmlConfComputeMemSizeInfo_t;
typedef struct nvmlConfComputeMemSizeInfo_t nvmlConfComputeMemSizeInfo_t;
struct nvmlConfComputeGpuCertificate_t;
typedef struct nvmlConfComputeGpuCertificate_t nvmlConfComputeGpuCertificate_t;
struct nvmlConfComputeGpuAttestationReport_t;
typedef struct nvmlConfComputeGpuAttestationReport_t nvmlConfComputeGpuAttestationReport_t;
struct nvmlVgpuVersion_t;
typedef struct nvmlVgpuVersion_t nvmlVgpuVersion_t;
struct nvmlVgpuMetadata_t;
typedef struct nvmlVgpuMetadata_t nvmlVgpuMetadata_t;
struct nvmlVgpuPgpuCompatibility_t;
typedef struct nvmlVgpuPgpuCompatibility_t nvmlVgpuPgpuCompatibility_t;
struct nvmlGpuInstancePlacement_t;
typedef struct nvmlGpuInstancePlacement_t nvmlGpuInstancePlacement_t;
struct nvmlGpuInstanceProfileInfo_t;
typedef struct nvmlGpuInstanceProfileInfo_t nvmlGpuInstanceProfileInfo_t;
struct nvmlGpuInstanceProfileInfo_v2_t;
typedef struct nvmlGpuInstanceProfileInfo_v2_t nvmlGpuInstanceProfileInfo_v2_t;
struct nvmlGpuInstanceProfileInfo_v3_t;
typedef struct nvmlGpuInstanceProfileInfo_v3_t nvmlGpuInstanceProfileInfo_v3_t;
struct nvmlComputeInstancePlacement_t;
typedef struct nvmlComputeInstancePlacement_t nvmlComputeInstancePlacement_t;
struct nvmlComputeInstanceProfileInfo_t;
typedef struct nvmlComputeInstanceProfileInfo_t nvmlComputeInstanceProfileInfo_t;
struct nvmlComputeInstanceProfileInfo_v2_t;
typedef struct nvmlComputeInstanceProfileInfo_v2_t nvmlComputeInstanceProfileInfo_v2_t;
struct nvmlComputeInstanceProfileInfo_v3_t;
typedef struct nvmlComputeInstanceProfileInfo_v3_t nvmlComputeInstanceProfileInfo_v3_t;
struct _anon_pod6;
typedef struct _anon_pod6 _anon_pod6;
struct nvmlGpmSupport_t;
typedef struct nvmlGpmSupport_t nvmlGpmSupport_t;
struct nvmlMask255_t;
typedef struct nvmlMask255_t nvmlMask255_t;
struct nvmlDevicePowerMizerModes_v1_t;
typedef struct nvmlDevicePowerMizerModes_v1_t nvmlDevicePowerMizerModes_v1_t;
struct nvmlHostname_v1_t;
typedef struct nvmlHostname_v1_t nvmlHostname_v1_t;
struct nvmlEccSramUniqueUncorrectedErrorEntry_v1_t;
typedef struct nvmlEccSramUniqueUncorrectedErrorEntry_v1_t nvmlEccSramUniqueUncorrectedErrorEntry_v1_t;
struct nvmlNvLinkInfo_v1_t;
typedef struct nvmlNvLinkInfo_v1_t nvmlNvLinkInfo_v1_t;
struct nvmlNvlinkFirmwareVersion_t;
typedef struct nvmlNvlinkFirmwareVersion_t nvmlNvlinkFirmwareVersion_t;
union _anon_pod7;
typedef union _anon_pod7 _anon_pod7;
struct nvmlPowerValue_v2_t;
typedef struct nvmlPowerValue_v2_t nvmlPowerValue_v2_t;
struct nvmlVgpuTypeIdInfo_v1_t;
typedef struct nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_v1_t;
struct nvmlVgpuTypeMaxInstance_v1_t;
typedef struct nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_v1_t;
struct nvmlVgpuCreatablePlacementInfo_v1_t;
typedef struct nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_v1_t;
struct nvmlVgpuProcessUtilizationSample_t;
typedef struct nvmlVgpuProcessUtilizationSample_t nvmlVgpuProcessUtilizationSample_t;
struct nvmlVgpuProcessUtilizationInfo_v1_t;
typedef struct nvmlVgpuProcessUtilizationInfo_v1_t nvmlVgpuProcessUtilizationInfo_v1_t;
struct nvmlActiveVgpuInstanceInfo_v1_t;
typedef struct nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_v1_t;
struct nvmlEncoderSessionInfo_t;
typedef struct nvmlEncoderSessionInfo_t nvmlEncoderSessionInfo_t;
struct nvmlFBCSessionInfo_t;
typedef struct nvmlFBCSessionInfo_t nvmlFBCSessionInfo_t;
struct nvmlGpuFabricInfo_t;
typedef struct nvmlGpuFabricInfo_t nvmlGpuFabricInfo_t;
struct nvmlGpuFabricInfo_v2_t;
typedef struct nvmlGpuFabricInfo_v2_t nvmlGpuFabricInfo_v2_t;
struct nvmlGpuFabricInfo_v3_t;
typedef struct nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfo_v3_t;
struct nvmlEventData_t;
typedef struct nvmlEventData_t nvmlEventData_t;
struct nvmlSystemEventSetCreateRequest_v1_t;
typedef struct nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_v1_t;
struct nvmlSystemEventSetFreeRequest_v1_t;
typedef struct nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_v1_t;
struct nvmlSystemRegisterEventRequest_v1_t;
typedef struct nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_v1_t;
struct nvmlExcludedDeviceInfo_t;
typedef struct nvmlExcludedDeviceInfo_t nvmlExcludedDeviceInfo_t;
struct nvmlProcessDetailList_v1_t;
typedef struct nvmlProcessDetailList_v1_t nvmlProcessDetailList_v1_t;
struct nvmlBridgeChipHierarchy_t;
typedef struct nvmlBridgeChipHierarchy_t nvmlBridgeChipHierarchy_t;
struct nvmlSample_t;
typedef struct nvmlSample_t nvmlSample_t;
struct nvmlVgpuInstanceUtilizationSample_t;
typedef struct nvmlVgpuInstanceUtilizationSample_t nvmlVgpuInstanceUtilizationSample_t;
struct nvmlVgpuInstanceUtilizationInfo_v1_t;
typedef struct nvmlVgpuInstanceUtilizationInfo_v1_t nvmlVgpuInstanceUtilizationInfo_v1_t;
struct nvmlFieldValue_t;
typedef struct nvmlFieldValue_t nvmlFieldValue_t;
struct nvmlGpuThermalSettings_t;
typedef struct nvmlGpuThermalSettings_t nvmlGpuThermalSettings_t;
struct nvmlUUID_v1_t;
typedef struct nvmlUUID_v1_t nvmlUUID_v1_t;
struct nvmlClkMonStatus_t;
typedef struct nvmlClkMonStatus_t nvmlClkMonStatus_t;
struct nvmlProcessesUtilizationInfo_v1_t;
typedef struct nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_v1_t;
struct nvmlGpuDynamicPstatesInfo_t;
typedef struct nvmlGpuDynamicPstatesInfo_t nvmlGpuDynamicPstatesInfo_t;
union nvmlVgpuSchedulerParams_t;
typedef union nvmlVgpuSchedulerParams_t nvmlVgpuSchedulerParams_t;
union nvmlVgpuSchedulerSetParams_t;
typedef union nvmlVgpuSchedulerSetParams_t nvmlVgpuSchedulerSetParams_t;
struct nvmlVgpuLicenseInfo_t;
typedef struct nvmlVgpuLicenseInfo_t nvmlVgpuLicenseInfo_t;
struct nvmlGridLicensableFeature_t;
typedef struct nvmlGridLicensableFeature_t nvmlGridLicensableFeature_t;
struct nvmlUnitFanSpeeds_t;
typedef struct nvmlUnitFanSpeeds_t nvmlUnitFanSpeeds_t;
struct nvmlSystemEventSetWaitRequest_v1_t;
typedef struct nvmlSystemEventSetWaitRequest_v1_t nvmlSystemEventSetWaitRequest_v1_t;
struct nvmlVgpuPgpuMetadata_t;
typedef struct nvmlVgpuPgpuMetadata_t nvmlVgpuPgpuMetadata_t;
struct nvmlGpuInstanceInfo_t;
typedef struct nvmlGpuInstanceInfo_t nvmlGpuInstanceInfo_t;
struct nvmlComputeInstanceInfo_t;
typedef struct nvmlComputeInstanceInfo_t nvmlComputeInstanceInfo_t;
struct nvmlGpmMetric_t;
typedef struct nvmlGpmMetric_t nvmlGpmMetric_t;
struct nvmlWorkloadPowerProfileInfo_v1_t;
typedef struct nvmlWorkloadPowerProfileInfo_v1_t nvmlWorkloadPowerProfileInfo_v1_t;
struct nvmlWorkloadPowerProfileCurrentProfiles_v1_t;
typedef struct nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_v1_t;
struct nvmlWorkloadPowerProfileRequestedProfiles_v1_t;
typedef struct nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_v1_t;
struct nvmlEccSramUniqueUncorrectedErrorCounts_v1_t;
typedef struct nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_v1_t;
struct nvmlNvlinkFirmwareInfo_t;
typedef struct nvmlNvlinkFirmwareInfo_t nvmlNvlinkFirmwareInfo_t;
struct nvmlPRMTLV_v1_t;
typedef struct nvmlPRMTLV_v1_t nvmlPRMTLV_v1_t;
struct nvmlVgpuProcessesUtilizationInfo_v1_t;
typedef struct nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_v1_t;
struct nvmlVgpuInstancesUtilizationInfo_v1_t;
typedef struct nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_v1_t;
struct nvmlVgpuSchedulerLog_t;
typedef struct nvmlVgpuSchedulerLog_t nvmlVgpuSchedulerLog_t;
struct nvmlVgpuSchedulerGetState_t;
typedef struct nvmlVgpuSchedulerGetState_t nvmlVgpuSchedulerGetState_t;
struct nvmlVgpuSchedulerStateInfo_v1_t;
typedef struct nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_v1_t;
struct nvmlVgpuSchedulerLogInfo_v1_t;
typedef struct nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_v1_t;
struct nvmlVgpuSchedulerSetState_t;
typedef struct nvmlVgpuSchedulerSetState_t nvmlVgpuSchedulerSetState_t;
struct nvmlVgpuSchedulerState_v1_t;
typedef struct nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_v1_t;
struct nvmlGridLicensableFeatures_t;
typedef struct nvmlGridLicensableFeatures_t nvmlGridLicensableFeatures_t;
struct nvmlGpmMetricsGet_t;
typedef struct nvmlGpmMetricsGet_t nvmlGpmMetricsGet_t;
struct nvmlNvLinkInfo_v2_t;
typedef struct nvmlNvLinkInfo_v2_t nvmlNvLinkInfo_v2_t;
struct nvmlWorkloadPowerProfileProfilesInfo_v1_t;
typedef struct nvmlWorkloadPowerProfileProfilesInfo_v1_t nvmlWorkloadPowerProfileProfilesInfo_v1_t;

/* "cuda/bindings/cy_nvml.pxd":15
 * 
 * # enums
 * ctypedef enum nvmlBridgeChipType_t "nvmlBridgeChipType_t":             # <<<<<<<<<<<<<<
 *     NVML_BRIDGE_CHIP_PLX "NVML_BRIDGE_CHIP_PLX" = 0
 *     NVML_BRIDGE_CHIP_BRO4 "NVML_BRIDGE_CHIP_BRO4" = 1
*/
enum nvmlBridgeChipType_t {
  NVML_BRIDGE_CHIP_PLX = 0,
  NVML_BRIDGE_CHIP_BRO4 = 1
};
typedef enum nvmlBridgeChipType_t nvmlBridgeChipType_t;

/* "cuda/bindings/cy_nvml.pxd":19
 *     NVML_BRIDGE_CHIP_BRO4 "NVML_BRIDGE_CHIP_BRO4" = 1
 * 
 * ctypedef enum nvmlNvLinkUtilizationCountUnits_t "nvmlNvLinkUtilizationCountUnits_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_COUNTER_UNIT_CYCLES "NVML_NVLINK_COUNTER_UNIT_CYCLES" = 0
 *     NVML_NVLINK_COUNTER_UNIT_PACKETS "NVML_NVLINK_COUNTER_UNIT_PACKETS" = 1
*/
enum nvmlNvLinkUtilizationCountUnits_t {
  NVML_NVLINK_COUNTER_UNIT_CYCLES = 0,
  NVML_NVLINK_COUNTER_UNIT_PACKETS = 1,
  NVML_NVLINK_COUNTER_UNIT_BYTES = 2,
  NVML_NVLINK_COUNTER_UNIT_RESERVED = 3,
  NVML_NVLINK_COUNTER_UNIT_COUNT
};
typedef enum nvmlNvLinkUtilizationCountUnits_t nvmlNvLinkUtilizationCountUnits_t;

/* "cuda/bindings/cy_nvml.pxd":26
 *     NVML_NVLINK_COUNTER_UNIT_COUNT "NVML_NVLINK_COUNTER_UNIT_COUNT"
 * 
 * ctypedef enum nvmlNvLinkUtilizationCountPktTypes_t "nvmlNvLinkUtilizationCountPktTypes_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_COUNTER_PKTFILTER_NOP "NVML_NVLINK_COUNTER_PKTFILTER_NOP" = 0x1
 *     NVML_NVLINK_COUNTER_PKTFILTER_READ "NVML_NVLINK_COUNTER_PKTFILTER_READ" = 0x2
*/
enum nvmlNvLinkUtilizationCountPktTypes_t {
  NVML_NVLINK_COUNTER_PKTFILTER_NOP = 0x1,
  NVML_NVLINK_COUNTER_PKTFILTER_READ = 0x2,
  NVML_NVLINK_COUNTER_PKTFILTER_WRITE = 0x4,
  NVML_NVLINK_COUNTER_PKTFILTER_RATOM = 0x8,
  NVML_NVLINK_COUNTER_PKTFILTER_NRATOM = 0x10,
  NVML_NVLINK_COUNTER_PKTFILTER_FLUSH = 0x20,
  NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA = 0x40,
  NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA = 0x80,
  NVML_NVLINK_COUNTER_PKTFILTER_ALL = 0xFF
};
typedef enum nvmlNvLinkUtilizationCountPktTypes_t nvmlNvLinkUtilizationCountPktTypes_t;

/* "cuda/bindings/cy_nvml.pxd":37
 *     NVML_NVLINK_COUNTER_PKTFILTER_ALL "NVML_NVLINK_COUNTER_PKTFILTER_ALL" = 0xFF
 * 
 * ctypedef enum nvmlNvLinkCapability_t "nvmlNvLinkCapability_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_CAP_P2P_SUPPORTED "NVML_NVLINK_CAP_P2P_SUPPORTED" = 0
 *     NVML_NVLINK_CAP_SYSMEM_ACCESS "NVML_NVLINK_CAP_SYSMEM_ACCESS" = 1
*/
enum nvmlNvLinkCapability_t {
  NVML_NVLINK_CAP_P2P_SUPPORTED = 0,
  NVML_NVLINK_CAP_SYSMEM_ACCESS = 1,
  NVML_NVLINK_CAP_P2P_ATOMICS = 2,
  NVML_NVLINK_CAP_SYSMEM_ATOMICS = 3,
  NVML_NVLINK_CAP_SLI_BRIDGE = 4,
  NVML_NVLINK_CAP_VALID = 5,
  NVML_NVLINK_CAP_COUNT
};
typedef enum nvmlNvLinkCapability_t nvmlNvLinkCapability_t;

/* "cuda/bindings/cy_nvml.pxd":46
 *     NVML_NVLINK_CAP_COUNT "NVML_NVLINK_CAP_COUNT"
 * 
 * ctypedef enum nvmlNvLinkErrorCounter_t "nvmlNvLinkErrorCounter_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_ERROR_DL_REPLAY "NVML_NVLINK_ERROR_DL_REPLAY" = 0
 *     NVML_NVLINK_ERROR_DL_RECOVERY "NVML_NVLINK_ERROR_DL_RECOVERY" = 1
*/
enum nvmlNvLinkErrorCounter_t {
  NVML_NVLINK_ERROR_DL_REPLAY = 0,
  NVML_NVLINK_ERROR_DL_RECOVERY = 1,
  NVML_NVLINK_ERROR_DL_CRC_FLIT = 2,
  NVML_NVLINK_ERROR_DL_CRC_DATA = 3,
  NVML_NVLINK_ERROR_DL_ECC_DATA = 4,
  NVML_NVLINK_ERROR_COUNT
};
typedef enum nvmlNvLinkErrorCounter_t nvmlNvLinkErrorCounter_t;

/* "cuda/bindings/cy_nvml.pxd":54
 *     NVML_NVLINK_ERROR_COUNT "NVML_NVLINK_ERROR_COUNT"
 * 
 * ctypedef enum nvmlIntNvLinkDeviceType_t "nvmlIntNvLinkDeviceType_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_DEVICE_TYPE_GPU "NVML_NVLINK_DEVICE_TYPE_GPU" = 0x00
 *     NVML_NVLINK_DEVICE_TYPE_IBMNPU "NVML_NVLINK_DEVICE_TYPE_IBMNPU" = 0x01
*/
enum nvmlIntNvLinkDeviceType_t {
  NVML_NVLINK_DEVICE_TYPE_GPU = 0x00,
  NVML_NVLINK_DEVICE_TYPE_IBMNPU = 0x01,
  NVML_NVLINK_DEVICE_TYPE_SWITCH = 0x02,
  NVML_NVLINK_DEVICE_TYPE_UNKNOWN = 0xFF
};
typedef enum nvmlIntNvLinkDeviceType_t nvmlIntNvLinkDeviceType_t;

/* "cuda/bindings/cy_nvml.pxd":60
 *     NVML_NVLINK_DEVICE_TYPE_UNKNOWN "NVML_NVLINK_DEVICE_TYPE_UNKNOWN" = 0xFF
 * 
 * ctypedef enum nvmlGpuTopologyLevel_t "nvmlGpuTopologyLevel_t":             # <<<<<<<<<<<<<<
 *     NVML_TOPOLOGY_INTERNAL "NVML_TOPOLOGY_INTERNAL" = 0
 *     NVML_TOPOLOGY_SINGLE "NVML_TOPOLOGY_SINGLE" = 10
*/
enum nvmlGpuTopologyLevel_t {
  NVML_TOPOLOGY_INTERNAL = 0,
  NVML_TOPOLOGY_SINGLE = 10,
  NVML_TOPOLOGY_MULTIPLE = 20,
  NVML_TOPOLOGY_HOSTBRIDGE = 30,
  NVML_TOPOLOGY_NODE = 40,
  NVML_TOPOLOGY_SYSTEM = 50
};
typedef enum nvmlGpuTopologyLevel_t nvmlGpuTopologyLevel_t;

/* "cuda/bindings/cy_nvml.pxd":68
 *     NVML_TOPOLOGY_SYSTEM "NVML_TOPOLOGY_SYSTEM" = 50
 * 
 * ctypedef enum nvmlGpuP2PStatus_t "nvmlGpuP2PStatus_t":             # <<<<<<<<<<<<<<
 *     NVML_P2P_STATUS_OK "NVML_P2P_STATUS_OK" = 0
 *     NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED "NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED"
*/
enum nvmlGpuP2PStatus_t {

  /* "cuda/bindings/cy_nvml.pxd":71
 *     NVML_P2P_STATUS_OK "NVML_P2P_STATUS_OK" = 0
 *     NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED "NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED"
 *     NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED "NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED" = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED             # <<<<<<<<<<<<<<
 *     NVML_P2P_STATUS_GPU_NOT_SUPPORTED "NVML_P2P_STATUS_GPU_NOT_SUPPORTED"
 *     NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED "NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED"
*/
  NVML_P2P_STATUS_OK = 0,
  NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED,
  NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED = NVML_P2P_STATUS_CHIPSET_NOT_SUPPORED,
  NVML_P2P_STATUS_GPU_NOT_SUPPORTED,
  NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED,
  NVML_P2P_STATUS_DISABLED_BY_REGKEY,
  NVML_P2P_STATUS_NOT_SUPPORTED,
  NVML_P2P_STATUS_UNKNOWN
};
typedef enum nvmlGpuP2PStatus_t nvmlGpuP2PStatus_t;

/* "cuda/bindings/cy_nvml.pxd":78
 *     NVML_P2P_STATUS_UNKNOWN "NVML_P2P_STATUS_UNKNOWN"
 * 
 * ctypedef enum nvmlGpuP2PCapsIndex_t "nvmlGpuP2PCapsIndex_t":             # <<<<<<<<<<<<<<
 *     NVML_P2P_CAPS_INDEX_READ "NVML_P2P_CAPS_INDEX_READ" = 0
 *     NVML_P2P_CAPS_INDEX_WRITE "NVML_P2P_CAPS_INDEX_WRITE" = 1
*/
enum nvmlGpuP2PCapsIndex_t {

  /* "cuda/bindings/cy_nvml.pxd":84
 *     NVML_P2P_CAPS_INDEX_ATOMICS "NVML_P2P_CAPS_INDEX_ATOMICS" = 3
 *     NVML_P2P_CAPS_INDEX_PCI "NVML_P2P_CAPS_INDEX_PCI" = 4
 *     NVML_P2P_CAPS_INDEX_PROP "NVML_P2P_CAPS_INDEX_PROP" = NVML_P2P_CAPS_INDEX_PCI             # <<<<<<<<<<<<<<
 *     NVML_P2P_CAPS_INDEX_UNKNOWN "NVML_P2P_CAPS_INDEX_UNKNOWN" = 5
 * 
*/
  NVML_P2P_CAPS_INDEX_READ = 0,
  NVML_P2P_CAPS_INDEX_WRITE = 1,
  NVML_P2P_CAPS_INDEX_NVLINK = 2,
  NVML_P2P_CAPS_INDEX_ATOMICS = 3,
  NVML_P2P_CAPS_INDEX_PCI = 4,
  NVML_P2P_CAPS_INDEX_PROP = NVML_P2P_CAPS_INDEX_PCI,
  NVML_P2P_CAPS_INDEX_UNKNOWN = 5
};
typedef enum nvmlGpuP2PCapsIndex_t nvmlGpuP2PCapsIndex_t;

/* "cuda/bindings/cy_nvml.pxd":87
 *     NVML_P2P_CAPS_INDEX_UNKNOWN "NVML_P2P_CAPS_INDEX_UNKNOWN" = 5
 * 
 * ctypedef enum nvmlSamplingType_t "nvmlSamplingType_t":             # <<<<<<<<<<<<<<
 *     NVML_TOTAL_POWER_SAMPLES "NVML_TOTAL_POWER_SAMPLES" = 0
 *     NVML_GPU_UTILIZATION_SAMPLES "NVML_GPU_UTILIZATION_SAMPLES" = 1
*/
enum nvmlSamplingType_t {
  NVML_TOTAL_POWER_SAMPLES = 0,
  NVML_GPU_UTILIZATION_SAMPLES = 1,
  NVML_MEMORY_UTILIZATION_SAMPLES = 2,
  NVML_ENC_UTILIZATION_SAMPLES = 3,
  NVML_DEC_UTILIZATION_SAMPLES = 4,
  NVML_PROCESSOR_CLK_SAMPLES = 5,
  NVML_MEMORY_CLK_SAMPLES = 6,
  NVML_MODULE_POWER_SAMPLES = 7,
  NVML_JPG_UTILIZATION_SAMPLES = 8,
  NVML_OFA_UTILIZATION_SAMPLES = 9,
  NVML_SAMPLINGTYPE_COUNT
};
typedef enum nvmlSamplingType_t nvmlSamplingType_t;

/* "cuda/bindings/cy_nvml.pxd":100
 *     NVML_SAMPLINGTYPE_COUNT "NVML_SAMPLINGTYPE_COUNT"
 * 
 * ctypedef enum nvmlPcieUtilCounter_t "nvmlPcieUtilCounter_t":             # <<<<<<<<<<<<<<
 *     NVML_PCIE_UTIL_TX_BYTES "NVML_PCIE_UTIL_TX_BYTES" = 0
 *     NVML_PCIE_UTIL_RX_BYTES "NVML_PCIE_UTIL_RX_BYTES" = 1
*/
enum nvmlPcieUtilCounter_t {
  NVML_PCIE_UTIL_TX_BYTES = 0,
  NVML_PCIE_UTIL_RX_BYTES = 1,
  NVML_PCIE_UTIL_COUNT
};
typedef enum nvmlPcieUtilCounter_t nvmlPcieUtilCounter_t;

/* "cuda/bindings/cy_nvml.pxd":105
 *     NVML_PCIE_UTIL_COUNT "NVML_PCIE_UTIL_COUNT"
 * 
 * ctypedef enum nvmlValueType_t "nvmlValueType_t":             # <<<<<<<<<<<<<<
 *     NVML_VALUE_TYPE_DOUBLE "NVML_VALUE_TYPE_DOUBLE" = 0
 *     NVML_VALUE_TYPE_UNSIGNED_INT "NVML_VALUE_TYPE_UNSIGNED_INT" = 1
*/
enum nvmlValueType_t {
  NVML_VALUE_TYPE_DOUBLE = 0,
  NVML_VALUE_TYPE_UNSIGNED_INT = 1,
  NVML_VALUE_TYPE_UNSIGNED_LONG = 2,
  NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3,
  NVML_VALUE_TYPE_SIGNED_LONG_LONG = 4,
  NVML_VALUE_TYPE_SIGNED_INT = 5,
  NVML_VALUE_TYPE_UNSIGNED_SHORT = 6,
  NVML_VALUE_TYPE_COUNT
};
typedef enum nvmlValueType_t nvmlValueType_t;

/* "cuda/bindings/cy_nvml.pxd":115
 *     NVML_VALUE_TYPE_COUNT "NVML_VALUE_TYPE_COUNT"
 * 
 * ctypedef enum nvmlPerfPolicyType_t "nvmlPerfPolicyType_t":             # <<<<<<<<<<<<<<
 *     NVML_PERF_POLICY_POWER "NVML_PERF_POLICY_POWER" = 0
 *     NVML_PERF_POLICY_THERMAL "NVML_PERF_POLICY_THERMAL" = 1
*/
enum nvmlPerfPolicyType_t {
  NVML_PERF_POLICY_POWER = 0,
  NVML_PERF_POLICY_THERMAL = 1,
  NVML_PERF_POLICY_SYNC_BOOST = 2,
  NVML_PERF_POLICY_BOARD_LIMIT = 3,
  NVML_PERF_POLICY_LOW_UTILIZATION = 4,
  NVML_PERF_POLICY_RELIABILITY = 5,
  NVML_PERF_POLICY_TOTAL_APP_CLOCKS = 10,
  NVML_PERF_POLICY_TOTAL_BASE_CLOCKS = 11,
  NVML_PERF_POLICY_COUNT
};
typedef enum nvmlPerfPolicyType_t nvmlPerfPolicyType_t;

/* "cuda/bindings/cy_nvml.pxd":126
 *     NVML_PERF_POLICY_COUNT "NVML_PERF_POLICY_COUNT"
 * 
 * ctypedef enum nvmlThermalTarget_t "nvmlThermalTarget_t":             # <<<<<<<<<<<<<<
 *     NVML_THERMAL_TARGET_NONE "NVML_THERMAL_TARGET_NONE" = 0
 *     NVML_THERMAL_TARGET_GPU "NVML_THERMAL_TARGET_GPU" = 1
*/
enum nvmlThermalTarget_t {
  NVML_THERMAL_TARGET_NONE = 0,
  NVML_THERMAL_TARGET_GPU = 1,
  NVML_THERMAL_TARGET_MEMORY = 2,
  NVML_THERMAL_TARGET_POWER_SUPPLY = 4,
  NVML_THERMAL_TARGET_BOARD = 8,
  NVML_THERMAL_TARGET_VCD_BOARD = 9,
  NVML_THERMAL_TARGET_VCD_INLET = 10,
  NVML_THERMAL_TARGET_VCD_OUTLET = 11,
  NVML_THERMAL_TARGET_ALL = 15,
  NVML_THERMAL_TARGET_UNKNOWN = -1L
};
typedef enum nvmlThermalTarget_t nvmlThermalTarget_t;

/* "cuda/bindings/cy_nvml.pxd":138
 *     NVML_THERMAL_TARGET_UNKNOWN "NVML_THERMAL_TARGET_UNKNOWN" = -(1)
 * 
 * ctypedef enum nvmlThermalController_t "nvmlThermalController_t":             # <<<<<<<<<<<<<<
 *     NVML_THERMAL_CONTROLLER_NONE "NVML_THERMAL_CONTROLLER_NONE" = 0
 *     NVML_THERMAL_CONTROLLER_GPU_INTERNAL "NVML_THERMAL_CONTROLLER_GPU_INTERNAL"
*/
enum nvmlThermalController_t {
  NVML_THERMAL_CONTROLLER_NONE = 0,
  NVML_THERMAL_CONTROLLER_GPU_INTERNAL,
  NVML_THERMAL_CONTROLLER_ADM1032,
  NVML_THERMAL_CONTROLLER_ADT7461,
  NVML_THERMAL_CONTROLLER_MAX6649,
  NVML_THERMAL_CONTROLLER_MAX1617,
  NVML_THERMAL_CONTROLLER_LM99,
  NVML_THERMAL_CONTROLLER_LM89,
  NVML_THERMAL_CONTROLLER_LM64,
  NVML_THERMAL_CONTROLLER_G781,
  NVML_THERMAL_CONTROLLER_ADT7473,
  NVML_THERMAL_CONTROLLER_SBMAX6649,
  NVML_THERMAL_CONTROLLER_VBIOSEVT,
  NVML_THERMAL_CONTROLLER_OS,
  NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS,
  NVML_THERMAL_CONTROLLER_NVSYSCON_E551,
  NVML_THERMAL_CONTROLLER_MAX6649R,
  NVML_THERMAL_CONTROLLER_ADT7473S,
  NVML_THERMAL_CONTROLLER_UNKNOWN = -1L
};
typedef enum nvmlThermalController_t nvmlThermalController_t;

/* "cuda/bindings/cy_nvml.pxd":159
 *     NVML_THERMAL_CONTROLLER_UNKNOWN "NVML_THERMAL_CONTROLLER_UNKNOWN" = -(1)
 * 
 * ctypedef enum nvmlCoolerControl_t "nvmlCoolerControl_t":             # <<<<<<<<<<<<<<
 *     NVML_THERMAL_COOLER_SIGNAL_NONE "NVML_THERMAL_COOLER_SIGNAL_NONE" = 0
 *     NVML_THERMAL_COOLER_SIGNAL_TOGGLE "NVML_THERMAL_COOLER_SIGNAL_TOGGLE" = 1
*/
enum nvmlCoolerControl_t {
  NVML_THERMAL_COOLER_SIGNAL_NONE = 0,
  NVML_THERMAL_COOLER_SIGNAL_TOGGLE = 1,
  NVML_THERMAL_COOLER_SIGNAL_VARIABLE = 2,
  NVML_THERMAL_COOLER_SIGNAL_COUNT
};
typedef enum nvmlCoolerControl_t nvmlCoolerControl_t;

/* "cuda/bindings/cy_nvml.pxd":165
 *     NVML_THERMAL_COOLER_SIGNAL_COUNT "NVML_THERMAL_COOLER_SIGNAL_COUNT"
 * 
 * ctypedef enum nvmlCoolerTarget_t "nvmlCoolerTarget_t":             # <<<<<<<<<<<<<<
 *     NVML_THERMAL_COOLER_TARGET_NONE "NVML_THERMAL_COOLER_TARGET_NONE" = (1 << 0)
 *     NVML_THERMAL_COOLER_TARGET_GPU "NVML_THERMAL_COOLER_TARGET_GPU" = (1 << 1)
*/
enum nvmlCoolerTarget_t {

  /* "cuda/bindings/cy_nvml.pxd":170
 *     NVML_THERMAL_COOLER_TARGET_MEMORY "NVML_THERMAL_COOLER_TARGET_MEMORY" = (1 << 2)
 *     NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY "NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY" = (1 << 3)
 *     NVML_THERMAL_COOLER_TARGET_GPU_RELATED "NVML_THERMAL_COOLER_TARGET_GPU_RELATED" = ((NVML_THERMAL_COOLER_TARGET_GPU | NVML_THERMAL_COOLER_TARGET_MEMORY) | NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY)             # <<<<<<<<<<<<<<
 * 
 * ctypedef enum nvmlUUIDType_t "nvmlUUIDType_t":
*/
  NVML_THERMAL_COOLER_TARGET_NONE = (1 << 0),
  NVML_THERMAL_COOLER_TARGET_GPU = (1 << 1),
  NVML_THERMAL_COOLER_TARGET_MEMORY = (1 << 2),
  NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY = (1 << 3),
  NVML_THERMAL_COOLER_TARGET_GPU_RELATED = ((NVML_THERMAL_COOLER_TARGET_GPU | NVML_THERMAL_COOLER_TARGET_MEMORY) | NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY)
};
typedef enum nvmlCoolerTarget_t nvmlCoolerTarget_t;

/* "cuda/bindings/cy_nvml.pxd":172
 *     NVML_THERMAL_COOLER_TARGET_GPU_RELATED "NVML_THERMAL_COOLER_TARGET_GPU_RELATED" = ((NVML_THERMAL_COOLER_TARGET_GPU | NVML_THERMAL_COOLER_TARGET_MEMORY) | NVML_THERMAL_COOLER_TARGET_POWER_SUPPLY)
 * 
 * ctypedef enum nvmlUUIDType_t "nvmlUUIDType_t":             # <<<<<<<<<<<<<<
 *     NVML_UUID_TYPE_NONE "NVML_UUID_TYPE_NONE" = 0
 *     NVML_UUID_TYPE_ASCII "NVML_UUID_TYPE_ASCII" = 1
*/
enum nvmlUUIDType_t {
  NVML_UUID_TYPE_NONE = 0,
  NVML_UUID_TYPE_ASCII = 1,
  NVML_UUID_TYPE_BINARY = 2
};
typedef enum nvmlUUIDType_t nvmlUUIDType_t;

/* "cuda/bindings/cy_nvml.pxd":177
 *     NVML_UUID_TYPE_BINARY "NVML_UUID_TYPE_BINARY" = 2
 * 
 * ctypedef enum nvmlEnableState_t "nvmlEnableState_t":             # <<<<<<<<<<<<<<
 *     NVML_FEATURE_DISABLED "NVML_FEATURE_DISABLED" = 0
 *     NVML_FEATURE_ENABLED "NVML_FEATURE_ENABLED" = 1
*/
enum nvmlEnableState_t {
  NVML_FEATURE_DISABLED = 0,
  NVML_FEATURE_ENABLED = 1
};
typedef enum nvmlEnableState_t nvmlEnableState_t;

/* "cuda/bindings/cy_nvml.pxd":181
 *     NVML_FEATURE_ENABLED "NVML_FEATURE_ENABLED" = 1
 * 
 * ctypedef enum nvmlBrandType_t "nvmlBrandType_t":             # <<<<<<<<<<<<<<
 *     NVML_BRAND_UNKNOWN "NVML_BRAND_UNKNOWN" = 0
 *     NVML_BRAND_QUADRO "NVML_BRAND_QUADRO" = 1
*/
enum nvmlBrandType_t {

  /* "cuda/bindings/cy_nvml.pxd":194
 *     NVML_BRAND_NVIDIA_VWS "NVML_BRAND_NVIDIA_VWS" = 10
 *     NVML_BRAND_NVIDIA_CLOUD_GAMING "NVML_BRAND_NVIDIA_CLOUD_GAMING" = 11
 *     NVML_BRAND_NVIDIA_VGAMING "NVML_BRAND_NVIDIA_VGAMING" = NVML_BRAND_NVIDIA_CLOUD_GAMING             # <<<<<<<<<<<<<<
 *     NVML_BRAND_QUADRO_RTX "NVML_BRAND_QUADRO_RTX" = 12
 *     NVML_BRAND_NVIDIA_RTX "NVML_BRAND_NVIDIA_RTX" = 13
*/
  NVML_BRAND_UNKNOWN = 0,
  NVML_BRAND_QUADRO = 1,
  NVML_BRAND_TESLA = 2,
  NVML_BRAND_NVS = 3,
  NVML_BRAND_GRID = 4,
  NVML_BRAND_GEFORCE = 5,
  NVML_BRAND_TITAN = 6,
  NVML_BRAND_NVIDIA_VAPPS = 7,
  NVML_BRAND_NVIDIA_VPC = 8,
  NVML_BRAND_NVIDIA_VCS = 9,
  NVML_BRAND_NVIDIA_VWS = 10,
  NVML_BRAND_NVIDIA_CLOUD_GAMING = 11,
  NVML_BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING,
  NVML_BRAND_QUADRO_RTX = 12,
  NVML_BRAND_NVIDIA_RTX = 13,
  NVML_BRAND_NVIDIA = 14,
  NVML_BRAND_GEFORCE_RTX = 15,
  NVML_BRAND_TITAN_RTX = 16,
  NVML_BRAND_COUNT = 18
};
typedef enum nvmlBrandType_t nvmlBrandType_t;

/* "cuda/bindings/cy_nvml.pxd":202
 *     NVML_BRAND_COUNT "NVML_BRAND_COUNT" = 18
 * 
 * ctypedef enum nvmlTemperatureThresholds_t "nvmlTemperatureThresholds_t":             # <<<<<<<<<<<<<<
 *     NVML_TEMPERATURE_THRESHOLD_SHUTDOWN "NVML_TEMPERATURE_THRESHOLD_SHUTDOWN" = 0
 *     NVML_TEMPERATURE_THRESHOLD_SLOWDOWN "NVML_TEMPERATURE_THRESHOLD_SLOWDOWN" = 1
*/
enum nvmlTemperatureThresholds_t {
  NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0,
  NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1,
  NVML_TEMPERATURE_THRESHOLD_MEM_MAX = 2,
  NVML_TEMPERATURE_THRESHOLD_GPU_MAX = 3,
  NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = 4,
  NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = 5,
  NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = 6,
  NVML_TEMPERATURE_THRESHOLD_GPS_CURR = 7,
  NVML_TEMPERATURE_THRESHOLD_COUNT
};
typedef enum nvmlTemperatureThresholds_t nvmlTemperatureThresholds_t;

/* "cuda/bindings/cy_nvml.pxd":213
 *     NVML_TEMPERATURE_THRESHOLD_COUNT "NVML_TEMPERATURE_THRESHOLD_COUNT"
 * 
 * ctypedef enum nvmlTemperatureSensors_t "nvmlTemperatureSensors_t":             # <<<<<<<<<<<<<<
 *     NVML_TEMPERATURE_GPU "NVML_TEMPERATURE_GPU" = 0
 *     NVML_TEMPERATURE_COUNT "NVML_TEMPERATURE_COUNT"
*/
enum nvmlTemperatureSensors_t {
  NVML_TEMPERATURE_GPU = 0,
  NVML_TEMPERATURE_COUNT
};
typedef enum nvmlTemperatureSensors_t nvmlTemperatureSensors_t;

/* "cuda/bindings/cy_nvml.pxd":217
 *     NVML_TEMPERATURE_COUNT "NVML_TEMPERATURE_COUNT"
 * 
 * ctypedef enum nvmlComputeMode_t "nvmlComputeMode_t":             # <<<<<<<<<<<<<<
 *     NVML_COMPUTEMODE_DEFAULT "NVML_COMPUTEMODE_DEFAULT" = 0
 *     NVML_COMPUTEMODE_EXCLUSIVE_THREAD "NVML_COMPUTEMODE_EXCLUSIVE_THREAD" = 1
*/
enum nvmlComputeMode_t {
  NVML_COMPUTEMODE_DEFAULT = 0,
  NVML_COMPUTEMODE_EXCLUSIVE_THREAD = 1,
  NVML_COMPUTEMODE_PROHIBITED = 2,
  NVML_COMPUTEMODE_EXCLUSIVE_PROCESS = 3,
  NVML_COMPUTEMODE_COUNT
};
typedef enum nvmlComputeMode_t nvmlComputeMode_t;

/* "cuda/bindings/cy_nvml.pxd":224
 *     NVML_COMPUTEMODE_COUNT "NVML_COMPUTEMODE_COUNT"
 * 
 * ctypedef enum nvmlMemoryErrorType_t "nvmlMemoryErrorType_t":             # <<<<<<<<<<<<<<
 *     NVML_MEMORY_ERROR_TYPE_CORRECTED "NVML_MEMORY_ERROR_TYPE_CORRECTED" = 0
 *     NVML_MEMORY_ERROR_TYPE_UNCORRECTED "NVML_MEMORY_ERROR_TYPE_UNCORRECTED" = 1
*/
enum nvmlMemoryErrorType_t {
  NVML_MEMORY_ERROR_TYPE_CORRECTED = 0,
  NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1,
  NVML_MEMORY_ERROR_TYPE_COUNT
};
typedef enum nvmlMemoryErrorType_t nvmlMemoryErrorType_t;

/* "cuda/bindings/cy_nvml.pxd":229
 *     NVML_MEMORY_ERROR_TYPE_COUNT "NVML_MEMORY_ERROR_TYPE_COUNT"
 * 
 * ctypedef enum nvmlNvlinkVersion_t "nvmlNvlinkVersion_t":             # <<<<<<<<<<<<<<
 *     NVML_NVLINK_VERSION_INVALID "NVML_NVLINK_VERSION_INVALID" = 0
 *     NVML_NVLINK_VERSION_1_0 "NVML_NVLINK_VERSION_1_0" = 1
*/
enum nvmlNvlinkVersion_t {
  NVML_NVLINK_VERSION_INVALID = 0,
  NVML_NVLINK_VERSION_1_0 = 1,
  NVML_NVLINK_VERSION_2_0 = 2,
  NVML_NVLINK_VERSION_2_2 = 3,
  NVML_NVLINK_VERSION_3_0 = 4,
  NVML_NVLINK_VERSION_3_1 = 5,
  NVML_NVLINK_VERSION_4_0 = 6,
  NVML_NVLINK_VERSION_5_0 = 7
};
typedef enum nvmlNvlinkVersion_t nvmlNvlinkVersion_t;

/* "cuda/bindings/cy_nvml.pxd":239
 *     NVML_NVLINK_VERSION_5_0 "NVML_NVLINK_VERSION_5_0" = 7
 * 
 * ctypedef enum nvmlEccCounterType_t "nvmlEccCounterType_t":             # <<<<<<<<<<<<<<
 *     NVML_VOLATILE_ECC "NVML_VOLATILE_ECC" = 0
 *     NVML_AGGREGATE_ECC "NVML_AGGREGATE_ECC" = 1
*/
enum nvmlEccCounterType_t {
  NVML_VOLATILE_ECC = 0,
  NVML_AGGREGATE_ECC = 1,
  NVML_ECC_COUNTER_TYPE_COUNT
};
typedef enum nvmlEccCounterType_t nvmlEccCounterType_t;

/* "cuda/bindings/cy_nvml.pxd":244
 *     NVML_ECC_COUNTER_TYPE_COUNT "NVML_ECC_COUNTER_TYPE_COUNT"
 * 
 * ctypedef enum nvmlClockType_t "nvmlClockType_t":             # <<<<<<<<<<<<<<
 *     NVML_CLOCK_GRAPHICS "NVML_CLOCK_GRAPHICS" = 0
 *     NVML_CLOCK_SM "NVML_CLOCK_SM" = 1
*/
enum nvmlClockType_t {
  NVML_CLOCK_GRAPHICS = 0,
  NVML_CLOCK_SM = 1,
  NVML_CLOCK_MEM = 2,
  NVML_CLOCK_VIDEO = 3,
  NVML_CLOCK_COUNT
};
typedef enum nvmlClockType_t nvmlClockType_t;

/* "cuda/bindings/cy_nvml.pxd":251
 *     NVML_CLOCK_COUNT "NVML_CLOCK_COUNT"
 * 
 * ctypedef enum nvmlClockId_t "nvmlClockId_t":             # <<<<<<<<<<<<<<
 *     NVML_CLOCK_ID_CURRENT "NVML_CLOCK_ID_CURRENT" = 0
 *     NVML_CLOCK_ID_APP_CLOCK_TARGET "NVML_CLOCK_ID_APP_CLOCK_TARGET" = 1
*/
enum nvmlClockId_t {
  NVML_CLOCK_ID_CURRENT = 0,
  NVML_CLOCK_ID_APP_CLOCK_TARGET = 1,
  NVML_CLOCK_ID_APP_CLOCK_DEFAULT = 2,
  NVML_CLOCK_ID_CUSTOMER_BOOST_MAX = 3,
  NVML_CLOCK_ID_COUNT
};
typedef enum nvmlClockId_t nvmlClockId_t;

/* "cuda/bindings/cy_nvml.pxd":258
 *     NVML_CLOCK_ID_COUNT "NVML_CLOCK_ID_COUNT"
 * 
 * ctypedef enum nvmlDriverModel_t "nvmlDriverModel_t":             # <<<<<<<<<<<<<<
 *     NVML_DRIVER_WDDM "NVML_DRIVER_WDDM" = 0
 *     NVML_DRIVER_WDM "NVML_DRIVER_WDM" = 1
*/
enum nvmlDriverModel_t {
  NVML_DRIVER_WDDM = 0,
  NVML_DRIVER_WDM = 1,
  NVML_DRIVER_MCDM = 2
};
typedef enum nvmlDriverModel_t nvmlDriverModel_t;

/* "cuda/bindings/cy_nvml.pxd":263
 *     NVML_DRIVER_MCDM "NVML_DRIVER_MCDM" = 2
 * 
 * ctypedef enum nvmlPstates_t "nvmlPstates_t":             # <<<<<<<<<<<<<<
 *     NVML_PSTATE_0 "NVML_PSTATE_0" = 0
 *     NVML_PSTATE_1 "NVML_PSTATE_1" = 1
*/
enum nvmlPstates_t {
  NVML_PSTATE_0 = 0,
  NVML_PSTATE_1 = 1,
  NVML_PSTATE_2 = 2,
  NVML_PSTATE_3 = 3,
  NVML_PSTATE_4 = 4,
  NVML_PSTATE_5 = 5,
  NVML_PSTATE_6 = 6,
  NVML_PSTATE_7 = 7,
  NVML_PSTATE_8 = 8,
  NVML_PSTATE_9 = 9,
  NVML_PSTATE_10 = 10,
  NVML_PSTATE_11 = 11,
  NVML_PSTATE_12 = 12,
  NVML_PSTATE_13 = 13,
  NVML_PSTATE_14 = 14,
  NVML_PSTATE_15 = 15,
  NVML_PSTATE_UNKNOWN = 32
};
typedef enum nvmlPstates_t nvmlPstates_t;

/* "cuda/bindings/cy_nvml.pxd":282
 *     NVML_PSTATE_UNKNOWN "NVML_PSTATE_UNKNOWN" = 32
 * 
 * ctypedef enum nvmlGpuOperationMode_t "nvmlGpuOperationMode_t":             # <<<<<<<<<<<<<<
 *     NVML_GOM_ALL_ON "NVML_GOM_ALL_ON" = 0
 *     NVML_GOM_COMPUTE "NVML_GOM_COMPUTE" = 1
*/
enum nvmlGpuOperationMode_t {
  NVML_GOM_ALL_ON = 0,
  NVML_GOM_COMPUTE = 1,
  NVML_GOM_LOW_DP = 2
};
typedef enum nvmlGpuOperationMode_t nvmlGpuOperationMode_t;

/* "cuda/bindings/cy_nvml.pxd":287
 *     NVML_GOM_LOW_DP "NVML_GOM_LOW_DP" = 2
 * 
 * ctypedef enum nvmlInforomObject_t "nvmlInforomObject_t":             # <<<<<<<<<<<<<<
 *     NVML_INFOROM_OEM "NVML_INFOROM_OEM" = 0
 *     NVML_INFOROM_ECC "NVML_INFOROM_ECC" = 1
*/
enum nvmlInforomObject_t {
  NVML_INFOROM_OEM = 0,
  NVML_INFOROM_ECC = 1,
  NVML_INFOROM_POWER = 2,
  NVML_INFOROM_DEN = 3,
  NVML_INFOROM_COUNT
};
typedef enum nvmlInforomObject_t nvmlInforomObject_t;

/* "cuda/bindings/cy_nvml.pxd":294
 *     NVML_INFOROM_COUNT "NVML_INFOROM_COUNT"
 * 
 * ctypedef enum nvmlReturn_t "nvmlReturn_t":             # <<<<<<<<<<<<<<
 *     NVML_SUCCESS "NVML_SUCCESS" = 0
 *     NVML_ERROR_UNINITIALIZED "NVML_ERROR_UNINITIALIZED" = 1
*/
enum nvmlReturn_t {
  NVML_SUCCESS = 0,
  NVML_ERROR_UNINITIALIZED = 1,
  NVML_ERROR_INVALID_ARGUMENT = 2,
  NVML_ERROR_NOT_SUPPORTED = 3,
  NVML_ERROR_NO_PERMISSION = 4,
  NVML_ERROR_ALREADY_INITIALIZED = 5,
  NVML_ERROR_NOT_FOUND = 6,
  NVML_ERROR_INSUFFICIENT_SIZE = 7,
  NVML_ERROR_INSUFFICIENT_POWER = 8,
  NVML_ERROR_DRIVER_NOT_LOADED = 9,
  NVML_ERROR_TIMEOUT = 10,
  NVML_ERROR_IRQ_ISSUE = 11,
  NVML_ERROR_LIBRARY_NOT_FOUND = 12,
  NVML_ERROR_FUNCTION_NOT_FOUND = 13,
  NVML_ERROR_CORRUPTED_INFOROM = 14,
  NVML_ERROR_GPU_IS_LOST = 15,
  NVML_ERROR_RESET_REQUIRED = 16,
  NVML_ERROR_OPERATING_SYSTEM = 17,
  NVML_ERROR_LIB_RM_VERSION_MISMATCH = 18,
  NVML_ERROR_IN_USE = 19,
  NVML_ERROR_MEMORY = 20,
  NVML_ERROR_NO_DATA = 21,
  NVML_ERROR_VGPU_ECC_NOT_SUPPORTED = 22,
  NVML_ERROR_INSUFFICIENT_RESOURCES = 23,
  NVML_ERROR_FREQ_NOT_SUPPORTED = 24,
  NVML_ERROR_ARGUMENT_VERSION_MISMATCH = 25,
  NVML_ERROR_DEPRECATED = 26,
  NVML_ERROR_NOT_READY = 27,
  NVML_ERROR_GPU_NOT_FOUND = 28,
  NVML_ERROR_INVALID_STATE = 29,
  NVML_ERROR_RESET_TYPE_NOT_SUPPORTED = 30,
  NVML_ERROR_UNKNOWN = 0x3E7,
  _NVMLRETURN_T_INTERNAL_LOADING_ERROR = -42L
};
typedef enum nvmlReturn_t nvmlReturn_t;

/* "cuda/bindings/cy_nvml.pxd":329
 *     _NVMLRETURN_T_INTERNAL_LOADING_ERROR "_NVMLRETURN_T_INTERNAL_LOADING_ERROR" = -42
 * 
 * ctypedef enum nvmlMemoryLocation_t "nvmlMemoryLocation_t":             # <<<<<<<<<<<<<<
 *     NVML_MEMORY_LOCATION_L1_CACHE "NVML_MEMORY_LOCATION_L1_CACHE" = 0
 *     NVML_MEMORY_LOCATION_L2_CACHE "NVML_MEMORY_LOCATION_L2_CACHE" = 1
*/
enum nvmlMemoryLocation_t {
  NVML_MEMORY_LOCATION_L1_CACHE = 0,
  NVML_MEMORY_LOCATION_L2_CACHE = 1,
  NVML_MEMORY_LOCATION_DRAM = 2,
  NVML_MEMORY_LOCATION_DEVICE_MEMORY = 2,
  NVML_MEMORY_LOCATION_REGISTER_FILE = 3,
  NVML_MEMORY_LOCATION_TEXTURE_MEMORY = 4,
  NVML_MEMORY_LOCATION_TEXTURE_SHM = 5,
  NVML_MEMORY_LOCATION_CBU = 6,
  NVML_MEMORY_LOCATION_SRAM = 7,
  NVML_MEMORY_LOCATION_COUNT
};
typedef enum nvmlMemoryLocation_t nvmlMemoryLocation_t;

/* "cuda/bindings/cy_nvml.pxd":341
 *     NVML_MEMORY_LOCATION_COUNT "NVML_MEMORY_LOCATION_COUNT"
 * 
 * ctypedef enum nvmlPageRetirementCause_t "nvmlPageRetirementCause_t":             # <<<<<<<<<<<<<<
 *     NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS "NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS" = 0
 *     NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR "NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR" = 1
*/
enum nvmlPageRetirementCause_t {
  NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS = 0,
  NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR = 1,
  NVML_PAGE_RETIREMENT_CAUSE_COUNT
};
typedef enum nvmlPageRetirementCause_t nvmlPageRetirementCause_t;

/* "cuda/bindings/cy_nvml.pxd":346
 *     NVML_PAGE_RETIREMENT_CAUSE_COUNT "NVML_PAGE_RETIREMENT_CAUSE_COUNT"
 * 
 * ctypedef enum nvmlRestrictedAPI_t "nvmlRestrictedAPI_t":             # <<<<<<<<<<<<<<
 *     NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS "NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS" = 0
 *     NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS "NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS" = 1
*/
enum nvmlRestrictedAPI_t {
  NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS = 0,
  NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS = 1,
  NVML_RESTRICTED_API_COUNT
};
typedef enum nvmlRestrictedAPI_t nvmlRestrictedAPI_t;

/* "cuda/bindings/cy_nvml.pxd":351
 *     NVML_RESTRICTED_API_COUNT "NVML_RESTRICTED_API_COUNT"
 * 
 * ctypedef enum nvmlGpuUtilizationDomainId_t "nvmlGpuUtilizationDomainId_t":             # <<<<<<<<<<<<<<
 *     NVML_GPU_UTILIZATION_DOMAIN_GPU "NVML_GPU_UTILIZATION_DOMAIN_GPU" = 0
 *     NVML_GPU_UTILIZATION_DOMAIN_FB "NVML_GPU_UTILIZATION_DOMAIN_FB" = 1
*/
enum nvmlGpuUtilizationDomainId_t {
  NVML_GPU_UTILIZATION_DOMAIN_GPU = 0,
  NVML_GPU_UTILIZATION_DOMAIN_FB = 1,
  NVML_GPU_UTILIZATION_DOMAIN_VID = 2,
  NVML_GPU_UTILIZATION_DOMAIN_BUS = 3
};
typedef enum nvmlGpuUtilizationDomainId_t nvmlGpuUtilizationDomainId_t;

/* "cuda/bindings/cy_nvml.pxd":357
 *     NVML_GPU_UTILIZATION_DOMAIN_BUS "NVML_GPU_UTILIZATION_DOMAIN_BUS" = 3
 * 
 * ctypedef enum nvmlGpuVirtualizationMode_t "nvmlGpuVirtualizationMode_t":             # <<<<<<<<<<<<<<
 *     NVML_GPU_VIRTUALIZATION_MODE_NONE "NVML_GPU_VIRTUALIZATION_MODE_NONE" = 0
 *     NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH "NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH" = 1
*/
enum nvmlGpuVirtualizationMode_t {
  NVML_GPU_VIRTUALIZATION_MODE_NONE = 0,
  NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH = 1,
  NVML_GPU_VIRTUALIZATION_MODE_VGPU = 2,
  NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU = 3,
  NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA = 4
};
typedef enum nvmlGpuVirtualizationMode_t nvmlGpuVirtualizationMode_t;

/* "cuda/bindings/cy_nvml.pxd":364
 *     NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA "NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA" = 4
 * 
 * ctypedef enum nvmlHostVgpuMode_t "nvmlHostVgpuMode_t":             # <<<<<<<<<<<<<<
 *     NVML_HOST_VGPU_MODE_NON_SRIOV "NVML_HOST_VGPU_MODE_NON_SRIOV" = 0
 *     NVML_HOST_VGPU_MODE_SRIOV "NVML_HOST_VGPU_MODE_SRIOV" = 1
*/
enum nvmlHostVgpuMode_t {
  NVML_HOST_VGPU_MODE_NON_SRIOV = 0,
  NVML_HOST_VGPU_MODE_SRIOV = 1
};
typedef enum nvmlHostVgpuMode_t nvmlHostVgpuMode_t;

/* "cuda/bindings/cy_nvml.pxd":368
 *     NVML_HOST_VGPU_MODE_SRIOV "NVML_HOST_VGPU_MODE_SRIOV" = 1
 * 
 * ctypedef enum nvmlVgpuVmIdType_t "nvmlVgpuVmIdType_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_VM_ID_DOMAIN_ID "NVML_VGPU_VM_ID_DOMAIN_ID" = 0
 *     NVML_VGPU_VM_ID_UUID "NVML_VGPU_VM_ID_UUID" = 1
*/
enum nvmlVgpuVmIdType_t {
  NVML_VGPU_VM_ID_DOMAIN_ID = 0,
  NVML_VGPU_VM_ID_UUID = 1
};
typedef enum nvmlVgpuVmIdType_t nvmlVgpuVmIdType_t;

/* "cuda/bindings/cy_nvml.pxd":372
 *     NVML_VGPU_VM_ID_UUID "NVML_VGPU_VM_ID_UUID" = 1
 * 
 * ctypedef enum nvmlVgpuGuestInfoState_t "nvmlVgpuGuestInfoState_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED "NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED" = 0
 *     NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED "NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED" = 1
*/
enum nvmlVgpuGuestInfoState_t {
  NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0,
  NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = 1
};
typedef enum nvmlVgpuGuestInfoState_t nvmlVgpuGuestInfoState_t;

/* "cuda/bindings/cy_nvml.pxd":376
 *     NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED "NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED" = 1
 * 
 * ctypedef enum nvmlGridLicenseFeatureCode_t "nvmlGridLicenseFeatureCode_t":             # <<<<<<<<<<<<<<
 *     NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN "NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN" = 0
 *     NVML_GRID_LICENSE_FEATURE_CODE_VGPU "NVML_GRID_LICENSE_FEATURE_CODE_VGPU" = 1
*/
enum nvmlGridLicenseFeatureCode_t {

  /* "cuda/bindings/cy_nvml.pxd":380
 *     NVML_GRID_LICENSE_FEATURE_CODE_VGPU "NVML_GRID_LICENSE_FEATURE_CODE_VGPU" = 1
 *     NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX "NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX" = 2
 *     NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION "NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION" = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX             # <<<<<<<<<<<<<<
 *     NVML_GRID_LICENSE_FEATURE_CODE_GAMING "NVML_GRID_LICENSE_FEATURE_CODE_GAMING" = 3
 *     NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE "NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE" = 4
*/
  NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN = 0,
  NVML_GRID_LICENSE_FEATURE_CODE_VGPU = 1,
  NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX = 2,
  NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX,
  NVML_GRID_LICENSE_FEATURE_CODE_GAMING = 3,
  NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE = 4
};
typedef enum nvmlGridLicenseFeatureCode_t nvmlGridLicenseFeatureCode_t;

/* "cuda/bindings/cy_nvml.pxd":384
 *     NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE "NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE" = 4
 * 
 * ctypedef enum nvmlVgpuCapability_t "nvmlVgpuCapability_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_CAP_NVLINK_P2P "NVML_VGPU_CAP_NVLINK_P2P" = 0
 *     NVML_VGPU_CAP_GPUDIRECT "NVML_VGPU_CAP_GPUDIRECT" = 1
*/
enum nvmlVgpuCapability_t {
  NVML_VGPU_CAP_NVLINK_P2P = 0,
  NVML_VGPU_CAP_GPUDIRECT = 1,
  NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE = 2,
  NVML_VGPU_CAP_EXCLUSIVE_TYPE = 3,
  NVML_VGPU_CAP_EXCLUSIVE_SIZE = 4,
  NVML_VGPU_CAP_COUNT
};
typedef enum nvmlVgpuCapability_t nvmlVgpuCapability_t;

/* "cuda/bindings/cy_nvml.pxd":392
 *     NVML_VGPU_CAP_COUNT "NVML_VGPU_CAP_COUNT"
 * 
 * ctypedef enum nvmlVgpuDriverCapability_t "nvmlVgpuDriverCapability_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU "NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU" = 0
 *     NVML_VGPU_DRIVER_CAP_WARM_UPDATE "NVML_VGPU_DRIVER_CAP_WARM_UPDATE" = 1
*/
enum nvmlVgpuDriverCapability_t {
  NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = 0,
  NVML_VGPU_DRIVER_CAP_WARM_UPDATE = 1,
  NVML_VGPU_DRIVER_CAP_COUNT
};
typedef enum nvmlVgpuDriverCapability_t nvmlVgpuDriverCapability_t;

/* "cuda/bindings/cy_nvml.pxd":397
 *     NVML_VGPU_DRIVER_CAP_COUNT "NVML_VGPU_DRIVER_CAP_COUNT"
 * 
 * ctypedef enum nvmlDeviceVgpuCapability_t "nvmlDeviceVgpuCapability_t":             # <<<<<<<<<<<<<<
 *     NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU "NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU" = 0
 *     NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES "NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES" = 1
*/
enum nvmlDeviceVgpuCapability_t {
  NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = 0,
  NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = 1,
  NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = 2,
  NVML_DEVICE_VGPU_CAP_READ_DEVICE_BUFFER_BW = 3,
  NVML_DEVICE_VGPU_CAP_WRITE_DEVICE_BUFFER_BW = 4,
  NVML_DEVICE_VGPU_CAP_DEVICE_STREAMING = 5,
  NVML_DEVICE_VGPU_CAP_MINI_QUARTER_GPU = 6,
  NVML_DEVICE_VGPU_CAP_COMPUTE_MEDIA_ENGINE_GPU = 7,
  NVML_DEVICE_VGPU_CAP_WARM_UPDATE = 8,
  NVML_DEVICE_VGPU_CAP_HOMOGENEOUS_PLACEMENTS = 9,
  NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_SUPPORTED = 10,
  NVML_DEVICE_VGPU_CAP_MIG_TIMESLICING_ENABLED = 11,
  NVML_DEVICE_VGPU_CAP_COUNT
};
typedef enum nvmlDeviceVgpuCapability_t nvmlDeviceVgpuCapability_t;

/* "cuda/bindings/cy_nvml.pxd":412
 *     NVML_DEVICE_VGPU_CAP_COUNT "NVML_DEVICE_VGPU_CAP_COUNT"
 * 
 * ctypedef enum nvmlDeviceGpuRecoveryAction_t "nvmlDeviceGpuRecoveryAction_t":             # <<<<<<<<<<<<<<
 *     NVML_GPU_RECOVERY_ACTION_NONE "NVML_GPU_RECOVERY_ACTION_NONE" = 0
 *     NVML_GPU_RECOVERY_ACTION_GPU_RESET "NVML_GPU_RECOVERY_ACTION_GPU_RESET" = 1
*/
enum nvmlDeviceGpuRecoveryAction_t {
  NVML_GPU_RECOVERY_ACTION_NONE = 0,
  NVML_GPU_RECOVERY_ACTION_GPU_RESET = 1,
  NVML_GPU_RECOVERY_ACTION_NODE_REBOOT = 2,
  NVML_GPU_RECOVERY_ACTION_DRAIN_P2P = 3,
  NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET = 4
};
typedef enum nvmlDeviceGpuRecoveryAction_t nvmlDeviceGpuRecoveryAction_t;

/* "cuda/bindings/cy_nvml.pxd":419
 *     NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET "NVML_GPU_RECOVERY_ACTION_DRAIN_AND_RESET" = 4
 * 
 * ctypedef enum nvmlFanState_t "nvmlFanState_t":             # <<<<<<<<<<<<<<
 *     NVML_FAN_NORMAL "NVML_FAN_NORMAL" = 0
 *     NVML_FAN_FAILED "NVML_FAN_FAILED" = 1
*/
enum nvmlFanState_t {
  NVML_FAN_NORMAL = 0,
  NVML_FAN_FAILED = 1
};
typedef enum nvmlFanState_t nvmlFanState_t;

/* "cuda/bindings/cy_nvml.pxd":423
 *     NVML_FAN_FAILED "NVML_FAN_FAILED" = 1
 * 
 * ctypedef enum nvmlLedColor_t "nvmlLedColor_t":             # <<<<<<<<<<<<<<
 *     NVML_LED_COLOR_GREEN "NVML_LED_COLOR_GREEN" = 0
 *     NVML_LED_COLOR_AMBER "NVML_LED_COLOR_AMBER" = 1
*/
enum nvmlLedColor_t {
  NVML_LED_COLOR_GREEN = 0,
  NVML_LED_COLOR_AMBER = 1
};
typedef enum nvmlLedColor_t nvmlLedColor_t;

/* "cuda/bindings/cy_nvml.pxd":427
 *     NVML_LED_COLOR_AMBER "NVML_LED_COLOR_AMBER" = 1
 * 
 * ctypedef enum nvmlEncoderType_t "nvmlEncoderType_t":             # <<<<<<<<<<<<<<
 *     NVML_ENCODER_QUERY_H264 "NVML_ENCODER_QUERY_H264" = 0x00
 *     NVML_ENCODER_QUERY_HEVC "NVML_ENCODER_QUERY_HEVC" = 0x01
*/
enum nvmlEncoderType_t {
  NVML_ENCODER_QUERY_H264 = 0x00,
  NVML_ENCODER_QUERY_HEVC = 0x01,
  NVML_ENCODER_QUERY_AV1 = 0x02,
  NVML_ENCODER_QUERY_UNKNOWN = 0xFF
};
typedef enum nvmlEncoderType_t nvmlEncoderType_t;

/* "cuda/bindings/cy_nvml.pxd":433
 *     NVML_ENCODER_QUERY_UNKNOWN "NVML_ENCODER_QUERY_UNKNOWN" = 0xFF
 * 
 * ctypedef enum nvmlFBCSessionType_t "nvmlFBCSessionType_t":             # <<<<<<<<<<<<<<
 *     NVML_FBC_SESSION_TYPE_UNKNOWN "NVML_FBC_SESSION_TYPE_UNKNOWN" = 0
 *     NVML_FBC_SESSION_TYPE_TOSYS "NVML_FBC_SESSION_TYPE_TOSYS"
*/
enum nvmlFBCSessionType_t {
  NVML_FBC_SESSION_TYPE_UNKNOWN = 0,
  NVML_FBC_SESSION_TYPE_TOSYS,
  NVML_FBC_SESSION_TYPE_CUDA,
  NVML_FBC_SESSION_TYPE_VID,
  NVML_FBC_SESSION_TYPE_HWENC
};
typedef enum nvmlFBCSessionType_t nvmlFBCSessionType_t;

/* "cuda/bindings/cy_nvml.pxd":440
 *     NVML_FBC_SESSION_TYPE_HWENC "NVML_FBC_SESSION_TYPE_HWENC"
 * 
 * ctypedef enum nvmlDetachGpuState_t "nvmlDetachGpuState_t":             # <<<<<<<<<<<<<<
 *     NVML_DETACH_GPU_KEEP "NVML_DETACH_GPU_KEEP" = 0
 *     NVML_DETACH_GPU_REMOVE "NVML_DETACH_GPU_REMOVE"
*/
enum nvmlDetachGpuState_t {
  NVML_DETACH_GPU_KEEP = 0,
  NVML_DETACH_GPU_REMOVE
};
typedef enum nvmlDetachGpuState_t nvmlDetachGpuState_t;

/* "cuda/bindings/cy_nvml.pxd":444
 *     NVML_DETACH_GPU_REMOVE "NVML_DETACH_GPU_REMOVE"
 * 
 * ctypedef enum nvmlPcieLinkState_t "nvmlPcieLinkState_t":             # <<<<<<<<<<<<<<
 *     NVML_PCIE_LINK_KEEP "NVML_PCIE_LINK_KEEP" = 0
 *     NVML_PCIE_LINK_SHUT_DOWN "NVML_PCIE_LINK_SHUT_DOWN"
*/
enum nvmlPcieLinkState_t {
  NVML_PCIE_LINK_KEEP = 0,
  NVML_PCIE_LINK_SHUT_DOWN
};
typedef enum nvmlPcieLinkState_t nvmlPcieLinkState_t;

/* "cuda/bindings/cy_nvml.pxd":448
 *     NVML_PCIE_LINK_SHUT_DOWN "NVML_PCIE_LINK_SHUT_DOWN"
 * 
 * ctypedef enum nvmlClockLimitId_t "nvmlClockLimitId_t":             # <<<<<<<<<<<<<<
 *     NVML_CLOCK_LIMIT_ID_RANGE_START "NVML_CLOCK_LIMIT_ID_RANGE_START" = 0xffffff00
 *     NVML_CLOCK_LIMIT_ID_TDP "NVML_CLOCK_LIMIT_ID_TDP"
*/
enum nvmlClockLimitId_t {
  NVML_CLOCK_LIMIT_ID_RANGE_START = 0xffffff00,
  NVML_CLOCK_LIMIT_ID_TDP,
  NVML_CLOCK_LIMIT_ID_UNLIMITED
};
typedef enum nvmlClockLimitId_t nvmlClockLimitId_t;

/* "cuda/bindings/cy_nvml.pxd":453
 *     NVML_CLOCK_LIMIT_ID_UNLIMITED "NVML_CLOCK_LIMIT_ID_UNLIMITED"
 * 
 * ctypedef enum nvmlVgpuVmCompatibility_t "nvmlVgpuVmCompatibility_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_VM_COMPATIBILITY_NONE "NVML_VGPU_VM_COMPATIBILITY_NONE" = 0x0
 *     NVML_VGPU_VM_COMPATIBILITY_COLD "NVML_VGPU_VM_COMPATIBILITY_COLD" = 0x1
*/
enum nvmlVgpuVmCompatibility_t {
  NVML_VGPU_VM_COMPATIBILITY_NONE = 0x0,
  NVML_VGPU_VM_COMPATIBILITY_COLD = 0x1,
  NVML_VGPU_VM_COMPATIBILITY_HIBERNATE = 0x2,
  NVML_VGPU_VM_COMPATIBILITY_SLEEP = 0x4,
  NVML_VGPU_VM_COMPATIBILITY_LIVE = 0x8
};
typedef enum nvmlVgpuVmCompatibility_t nvmlVgpuVmCompatibility_t;

/* "cuda/bindings/cy_nvml.pxd":460
 *     NVML_VGPU_VM_COMPATIBILITY_LIVE "NVML_VGPU_VM_COMPATIBILITY_LIVE" = 0x8
 * 
 * ctypedef enum nvmlVgpuPgpuCompatibilityLimitCode_t "nvmlVgpuPgpuCompatibilityLimitCode_t":             # <<<<<<<<<<<<<<
 *     NVML_VGPU_COMPATIBILITY_LIMIT_NONE "NVML_VGPU_COMPATIBILITY_LIMIT_NONE" = 0x0
 *     NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER "NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER" = 0x1
*/
enum nvmlVgpuPgpuCompatibilityLimitCode_t {
  NVML_VGPU_COMPATIBILITY_LIMIT_NONE = 0x0,
  NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = 0x1,
  NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = 0x2,
  NVML_VGPU_COMPATIBILITY_LIMIT_GPU = 0x4,
  NVML_VGPU_COMPATIBILITY_LIMIT_OTHER = 0x80000000
};
typedef enum nvmlVgpuPgpuCompatibilityLimitCode_t nvmlVgpuPgpuCompatibilityLimitCode_t;

/* "cuda/bindings/cy_nvml.pxd":467
 *     NVML_VGPU_COMPATIBILITY_LIMIT_OTHER "NVML_VGPU_COMPATIBILITY_LIMIT_OTHER" = 0x80000000
 * 
 * ctypedef enum nvmlGpmMetricId_t "nvmlGpmMetricId_t":             # <<<<<<<<<<<<<<
 *     NVML_GPM_METRIC_GRAPHICS_UTIL "NVML_GPM_METRIC_GRAPHICS_UTIL" = 1
 *     NVML_GPM_METRIC_SM_UTIL "NVML_GPM_METRIC_SM_UTIL" = 2
*/
enum nvmlGpmMetricId_t {
  NVML_GPM_METRIC_GRAPHICS_UTIL = 1,
  NVML_GPM_METRIC_SM_UTIL = 2,
  NVML_GPM_METRIC_SM_OCCUPANCY = 3,
  NVML_GPM_METRIC_INTEGER_UTIL = 4,
  NVML_GPM_METRIC_ANY_TENSOR_UTIL = 5,
  NVML_GPM_METRIC_DFMA_TENSOR_UTIL = 6,
  NVML_GPM_METRIC_HMMA_TENSOR_UTIL = 7,
  NVML_GPM_METRIC_IMMA_TENSOR_UTIL = 9,
  NVML_GPM_METRIC_DRAM_BW_UTIL = 10,
  NVML_GPM_METRIC_FP64_UTIL = 11,
  NVML_GPM_METRIC_FP32_UTIL = 12,
  NVML_GPM_METRIC_FP16_UTIL = 13,
  NVML_GPM_METRIC_PCIE_TX_PER_SEC = 20,
  NVML_GPM_METRIC_PCIE_RX_PER_SEC = 21,
  NVML_GPM_METRIC_NVDEC_0_UTIL = 30,
  NVML_GPM_METRIC_NVDEC_1_UTIL = 31,
  NVML_GPM_METRIC_NVDEC_2_UTIL = 32,
  NVML_GPM_METRIC_NVDEC_3_UTIL = 33,
  NVML_GPM_METRIC_NVDEC_4_UTIL = 34,
  NVML_GPM_METRIC_NVDEC_5_UTIL = 35,
  NVML_GPM_METRIC_NVDEC_6_UTIL = 36,
  NVML_GPM_METRIC_NVDEC_7_UTIL = 37,
  NVML_GPM_METRIC_NVJPG_0_UTIL = 40,
  NVML_GPM_METRIC_NVJPG_1_UTIL = 41,
  NVML_GPM_METRIC_NVJPG_2_UTIL = 42,
  NVML_GPM_METRIC_NVJPG_3_UTIL = 43,
  NVML_GPM_METRIC_NVJPG_4_UTIL = 44,
  NVML_GPM_METRIC_NVJPG_5_UTIL = 45,
  NVML_GPM_METRIC_NVJPG_6_UTIL = 46,
  NVML_GPM_METRIC_NVJPG_7_UTIL = 47,
  NVML_GPM_METRIC_NVOFA_0_UTIL = 50,
  NVML_GPM_METRIC_NVOFA_1_UTIL = 51,
  NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = 60,
  NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = 61,
  NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC = 62,
  NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC = 63,
  NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC = 64,
  NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC = 65,
  NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC = 66,
  NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC = 67,
  NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC = 68,
  NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC = 69,
  NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC = 70,
  NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC = 71,
  NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC = 72,
  NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC = 73,
  NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC = 74,
  NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC = 75,
  NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC = 76,
  NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC = 77,
  NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC = 78,
  NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC = 79,
  NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC = 80,
  NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC = 81,
  NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC = 82,
  NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC = 83,
  NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC = 84,
  NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC = 85,
  NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC = 86,
  NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC = 87,
  NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC = 88,
  NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC = 89,
  NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC = 90,
  NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC = 91,
  NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC = 92,
  NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC = 93,
  NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC = 94,
  NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC = 95,
  NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC = 96,
  NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC = 97,
  NVML_GPM_METRIC_C2C_TOTAL_TX_PER_SEC = 0x64,
  NVML_GPM_METRIC_C2C_TOTAL_RX_PER_SEC = 0x65,
  NVML_GPM_METRIC_C2C_DATA_TX_PER_SEC = 0x66,
  NVML_GPM_METRIC_C2C_DATA_RX_PER_SEC = 0x67,
  NVML_GPM_METRIC_C2C_LINK0_TOTAL_TX_PER_SEC = 0x68,
  NVML_GPM_METRIC_C2C_LINK0_TOTAL_RX_PER_SEC = 0x69,
  NVML_GPM_METRIC_C2C_LINK0_DATA_TX_PER_SEC = 0x6A,
  NVML_GPM_METRIC_C2C_LINK0_DATA_RX_PER_SEC = 0x6B,
  NVML_GPM_METRIC_C2C_LINK1_TOTAL_TX_PER_SEC = 0x6C,
  NVML_GPM_METRIC_C2C_LINK1_TOTAL_RX_PER_SEC = 0x6D,
  NVML_GPM_METRIC_C2C_LINK1_DATA_TX_PER_SEC = 0x6E,
  NVML_GPM_METRIC_C2C_LINK1_DATA_RX_PER_SEC = 0x6F,
  NVML_GPM_METRIC_C2C_LINK2_TOTAL_TX_PER_SEC = 0x70,
  NVML_GPM_METRIC_C2C_LINK2_TOTAL_RX_PER_SEC = 0x71,
  NVML_GPM_METRIC_C2C_LINK2_DATA_TX_PER_SEC = 0x72,
  NVML_GPM_METRIC_C2C_LINK2_DATA_RX_PER_SEC = 0x73,
  NVML_GPM_METRIC_C2C_LINK3_TOTAL_TX_PER_SEC = 0x74,
  NVML_GPM_METRIC_C2C_LINK3_TOTAL_RX_PER_SEC = 0x75,
  NVML_GPM_METRIC_C2C_LINK3_DATA_TX_PER_SEC = 0x76,
  NVML_GPM_METRIC_C2C_LINK3_DATA_RX_PER_SEC = 0x77,
  NVML_GPM_METRIC_C2C_LINK4_TOTAL_TX_PER_SEC = 0x78,
  NVML_GPM_METRIC_C2C_LINK4_TOTAL_RX_PER_SEC = 0x79,
  NVML_GPM_METRIC_C2C_LINK4_DATA_TX_PER_SEC = 0x7A,
  NVML_GPM_METRIC_C2C_LINK4_DATA_RX_PER_SEC = 0x7B,
  NVML_GPM_METRIC_C2C_LINK5_TOTAL_TX_PER_SEC = 0x7C,
  NVML_GPM_METRIC_C2C_LINK5_TOTAL_RX_PER_SEC = 0x7D,
  NVML_GPM_METRIC_C2C_LINK5_DATA_TX_PER_SEC = 0x7E,
  NVML_GPM_METRIC_C2C_LINK5_DATA_RX_PER_SEC = 0x7F,
  NVML_GPM_METRIC_C2C_LINK6_TOTAL_TX_PER_SEC = 0x80,
  NVML_GPM_METRIC_C2C_LINK6_TOTAL_RX_PER_SEC = 0x81,
  NVML_GPM_METRIC_C2C_LINK6_DATA_TX_PER_SEC = 0x82,
  NVML_GPM_METRIC_C2C_LINK6_DATA_RX_PER_SEC = 0x83,
  NVML_GPM_METRIC_C2C_LINK7_TOTAL_TX_PER_SEC = 0x84,
  NVML_GPM_METRIC_C2C_LINK7_TOTAL_RX_PER_SEC = 0x85,
  NVML_GPM_METRIC_C2C_LINK7_DATA_TX_PER_SEC = 0x86,
  NVML_GPM_METRIC_C2C_LINK7_DATA_RX_PER_SEC = 0x87,
  NVML_GPM_METRIC_C2C_LINK8_TOTAL_TX_PER_SEC = 0x88,
  NVML_GPM_METRIC_C2C_LINK8_TOTAL_RX_PER_SEC = 0x89,
  NVML_GPM_METRIC_C2C_LINK8_DATA_TX_PER_SEC = 0x8A,
  NVML_GPM_METRIC_C2C_LINK8_DATA_RX_PER_SEC = 0x8B,
  NVML_GPM_METRIC_C2C_LINK9_TOTAL_TX_PER_SEC = 0x8C,
  NVML_GPM_METRIC_C2C_LINK9_TOTAL_RX_PER_SEC = 0x8D,
  NVML_GPM_METRIC_C2C_LINK9_DATA_TX_PER_SEC = 0x8E,
  NVML_GPM_METRIC_C2C_LINK9_DATA_RX_PER_SEC = 0x8F,
  NVML_GPM_METRIC_C2C_LINK10_TOTAL_TX_PER_SEC = 0x90,
  NVML_GPM_METRIC_C2C_LINK10_TOTAL_RX_PER_SEC = 0x91,
  NVML_GPM_METRIC_C2C_LINK10_DATA_TX_PER_SEC = 0x92,
  NVML_GPM_METRIC_C2C_LINK10_DATA_RX_PER_SEC = 0x93,
  NVML_GPM_METRIC_C2C_LINK11_TOTAL_TX_PER_SEC = 0x94,
  NVML_GPM_METRIC_C2C_LINK11_TOTAL_RX_PER_SEC = 0x95,
  NVML_GPM_METRIC_C2C_LINK11_DATA_TX_PER_SEC = 0x96,
  NVML_GPM_METRIC_C2C_LINK11_DATA_RX_PER_SEC = 0x97,
  NVML_GPM_METRIC_C2C_LINK12_TOTAL_TX_PER_SEC = 0x98,
  NVML_GPM_METRIC_C2C_LINK12_TOTAL_RX_PER_SEC = 0x99,
  NVML_GPM_METRIC_C2C_LINK12_DATA_TX_PER_SEC = 0x9A,
  NVML_GPM_METRIC_C2C_LINK12_DATA_RX_PER_SEC = 0x9B,
  NVML_GPM_METRIC_C2C_LINK13_TOTAL_TX_PER_SEC = 0x9C,
  NVML_GPM_METRIC_C2C_LINK13_TOTAL_RX_PER_SEC = 0x9D,
  NVML_GPM_METRIC_C2C_LINK13_DATA_TX_PER_SEC = 0x9E,
  NVML_GPM_METRIC_C2C_LINK13_DATA_RX_PER_SEC = 0x9F,
  NVML_GPM_METRIC_HOSTMEM_CACHE_HIT = 0xA0,
  NVML_GPM_METRIC_HOSTMEM_CACHE_MISS = 0xA1,
  NVML_GPM_METRIC_PEERMEM_CACHE_HIT = 0xA2,
  NVML_GPM_METRIC_PEERMEM_CACHE_MISS = 0xA3,
  NVML_GPM_METRIC_DRAM_CACHE_HIT = 0xA4,
  NVML_GPM_METRIC_DRAM_CACHE_MISS = 0xA5,
  NVML_GPM_METRIC_NVENC_0_UTIL = 0xA6,
  NVML_GPM_METRIC_NVENC_1_UTIL = 0xA7,
  NVML_GPM_METRIC_NVENC_2_UTIL = 0xA8,
  NVML_GPM_METRIC_NVENC_3_UTIL = 0xA9,
  NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ELAPSED = 0xAA,
  NVML_GPM_METRIC_GR0_CTXSW_CYCLES_ACTIVE = 0xAB,
  NVML_GPM_METRIC_GR0_CTXSW_REQUESTS = 0xAC,
  NVML_GPM_METRIC_GR0_CTXSW_CYCLES_PER_REQ = 0xAD,
  NVML_GPM_METRIC_GR0_CTXSW_ACTIVE_PCT = 0xAE,
  NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ELAPSED = 0xAF,
  NVML_GPM_METRIC_GR1_CTXSW_CYCLES_ACTIVE = 0xB0,
  NVML_GPM_METRIC_GR1_CTXSW_REQUESTS = 0xB1,
  NVML_GPM_METRIC_GR1_CTXSW_CYCLES_PER_REQ = 0xB2,
  NVML_GPM_METRIC_GR1_CTXSW_ACTIVE_PCT = 0xB3,
  NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ELAPSED = 0xB4,
  NVML_GPM_METRIC_GR2_CTXSW_CYCLES_ACTIVE = 0xB5,
  NVML_GPM_METRIC_GR2_CTXSW_REQUESTS = 0xB6,
  NVML_GPM_METRIC_GR2_CTXSW_CYCLES_PER_REQ = 0xB7,
  NVML_GPM_METRIC_GR2_CTXSW_ACTIVE_PCT = 0xB8,
  NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ELAPSED = 0xB9,
  NVML_GPM_METRIC_GR3_CTXSW_CYCLES_ACTIVE = 0xBA,
  NVML_GPM_METRIC_GR3_CTXSW_REQUESTS = 0xBB,
  NVML_GPM_METRIC_GR3_CTXSW_CYCLES_PER_REQ = 0xBC,
  NVML_GPM_METRIC_GR3_CTXSW_ACTIVE_PCT = 0xBD,
  NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ELAPSED = 0xBE,
  NVML_GPM_METRIC_GR4_CTXSW_CYCLES_ACTIVE = 0xBF,
  NVML_GPM_METRIC_GR4_CTXSW_REQUESTS = 0xC0,
  NVML_GPM_METRIC_GR4_CTXSW_CYCLES_PER_REQ = 0xC1,
  NVML_GPM_METRIC_GR4_CTXSW_ACTIVE_PCT = 0xC2,
  NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ELAPSED = 0xC3,
  NVML_GPM_METRIC_GR5_CTXSW_CYCLES_ACTIVE = 0xC4,
  NVML_GPM_METRIC_GR5_CTXSW_REQUESTS = 0xC5,
  NVML_GPM_METRIC_GR5_CTXSW_CYCLES_PER_REQ = 0xC6,
  NVML_GPM_METRIC_GR5_CTXSW_ACTIVE_PCT = 0xC7,
  NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ELAPSED = 0xC8,
  NVML_GPM_METRIC_GR6_CTXSW_CYCLES_ACTIVE = 0xC9,
  NVML_GPM_METRIC_GR6_CTXSW_REQUESTS = 0xCA,
  NVML_GPM_METRIC_GR6_CTXSW_CYCLES_PER_REQ = 0xCB,
  NVML_GPM_METRIC_GR6_CTXSW_ACTIVE_PCT = 0xCC,
  NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ELAPSED = 0xCD,
  NVML_GPM_METRIC_GR7_CTXSW_CYCLES_ACTIVE = 0xCE,
  NVML_GPM_METRIC_GR7_CTXSW_REQUESTS = 0xCF,
  NVML_GPM_METRIC_GR7_CTXSW_CYCLES_PER_REQ = 0xD0,
  NVML_GPM_METRIC_GR7_CTXSW_ACTIVE_PCT = 0xD1,
  NVML_GPM_METRIC_MAX = 0xD2
};
typedef enum nvmlGpmMetricId_t nvmlGpmMetricId_t;

/* "cuda/bindings/cy_nvml.pxd":650
 *     NVML_GPM_METRIC_MAX "NVML_GPM_METRIC_MAX" = 210
 * 
 * ctypedef enum nvmlPowerProfileType_t "nvmlPowerProfileType_t":             # <<<<<<<<<<<<<<
 *     NVML_POWER_PROFILE_MAX_P "NVML_POWER_PROFILE_MAX_P" = 0
 *     NVML_POWER_PROFILE_MAX_Q "NVML_POWER_PROFILE_MAX_Q" = 1
*/
enum nvmlPowerProfileType_t {
  NVML_POWER_PROFILE_MAX_P = 0,
  NVML_POWER_PROFILE_MAX_Q = 1,
  NVML_POWER_PROFILE_COMPUTE = 2,
  NVML_POWER_PROFILE_MEMORY_BOUND = 3,
  NVML_POWER_PROFILE_NETWORK = 4,
  NVML_POWER_PROFILE_BALANCED = 5,
  NVML_POWER_PROFILE_LLM_INFERENCE = 6,
  NVML_POWER_PROFILE_LLM_TRAINING = 7,
  NVML_POWER_PROFILE_RBM = 8,
  NVML_POWER_PROFILE_DCPCIE = 9,
  NVML_POWER_PROFILE_HMMA_SPARSE = 10,
  NVML_POWER_PROFILE_HMMA_DENSE = 11,
  NVML_POWER_PROFILE_SYNC_BALANCED = 12,
  NVML_POWER_PROFILE_HPC = 13,
  NVML_POWER_PROFILE_MIG = 14,
  NVML_POWER_PROFILE_MAX = 15
};
typedef enum nvmlPowerProfileType_t nvmlPowerProfileType_t;

/* "cuda/bindings/cy_nvml.pxd":668
 *     NVML_POWER_PROFILE_MAX "NVML_POWER_PROFILE_MAX" = 15
 * 
 * ctypedef enum nvmlDeviceAddressingModeType_t "nvmlDeviceAddressingModeType_t":             # <<<<<<<<<<<<<<
 *     NVML_DEVICE_ADDRESSING_MODE_NONE "NVML_DEVICE_ADDRESSING_MODE_NONE" = 0
 *     NVML_DEVICE_ADDRESSING_MODE_HMM "NVML_DEVICE_ADDRESSING_MODE_HMM" = 1
*/
enum nvmlDeviceAddressingModeType_t {
  NVML_DEVICE_ADDRESSING_MODE_NONE = 0,
  NVML_DEVICE_ADDRESSING_MODE_HMM = 1,
  NVML_DEVICE_ADDRESSING_MODE_ATS = 2
};
typedef enum nvmlDeviceAddressingModeType_t nvmlDeviceAddressingModeType_t;

/* "cuda/bindings/cy_nvml.pxd":675
 * 
 * # types
 * ctypedef struct nvmlPciInfoExt_v1_t 'nvmlPciInfoExt_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int domain
*/
struct nvmlPciInfoExt_v1_t {
  unsigned int version;
  unsigned int domain;
  unsigned int bus;
  unsigned int device;
  unsigned int pciDeviceId;
  unsigned int pciSubSystemId;
  unsigned int baseClass;
  unsigned int subClass;
  char busId[32];
};

/* "cuda/bindings/cy_nvml.pxd":686
 *     char busId[32]
 * 
 * ctypedef struct nvmlCoolerInfo_v1_t 'nvmlCoolerInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int index
*/
struct nvmlCoolerInfo_v1_t {
  unsigned int version;
  unsigned int index;
  nvmlCoolerControl_t signalType;
  nvmlCoolerTarget_t target;
};

/* "cuda/bindings/cy_nvml.pxd":692
 *     nvmlCoolerTarget_t target
 * 
 * ctypedef struct nvmlDramEncryptionInfo_v1_t 'nvmlDramEncryptionInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlEnableState_t encryptionState
*/
struct nvmlDramEncryptionInfo_v1_t {
  unsigned int version;
  nvmlEnableState_t encryptionState;
};

/* "cuda/bindings/cy_nvml.pxd":696
 *     nvmlEnableState_t encryptionState
 * 
 * ctypedef struct nvmlMarginTemperature_v1_t 'nvmlMarginTemperature_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     int marginTemperature
*/
struct nvmlMarginTemperature_v1_t {
  unsigned int version;
  int marginTemperature;
};

/* "cuda/bindings/cy_nvml.pxd":700
 *     int marginTemperature
 * 
 * ctypedef struct nvmlClockOffset_v1_t 'nvmlClockOffset_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlClockType_t type
*/
struct nvmlClockOffset_v1_t {
  unsigned int version;
  nvmlClockType_t type;
  nvmlPstates_t pstate;
  int clockOffsetMHz;
  int minClockOffsetMHz;
  int maxClockOffsetMHz;
};

/* "cuda/bindings/cy_nvml.pxd":708
 *     int maxClockOffsetMHz
 * 
 * ctypedef struct nvmlFanSpeedInfo_v1_t 'nvmlFanSpeedInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int fan
*/
struct nvmlFanSpeedInfo_v1_t {
  unsigned int version;
  unsigned int fan;
  unsigned int speed;
};

/* "cuda/bindings/cy_nvml.pxd":713
 *     unsigned int speed
 * 
 * ctypedef struct nvmlDevicePerfModes_v1_t 'nvmlDevicePerfModes_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     char str[2048]
*/
struct nvmlDevicePerfModes_v1_t {
  unsigned int version;
  char str[2048];
};

/* "cuda/bindings/cy_nvml.pxd":717
 *     char str[2048]
 * 
 * ctypedef struct nvmlDeviceCurrentClockFreqs_v1_t 'nvmlDeviceCurrentClockFreqs_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     char str[2048]
*/
struct nvmlDeviceCurrentClockFreqs_v1_t {
  unsigned int version;
  char str[2048];
};

/* "cuda/bindings/cy_nvml.pxd":721
 *     char str[2048]
 * 
 * ctypedef struct nvmlEccSramErrorStatus_v1_t 'nvmlEccSramErrorStatus_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long aggregateUncParity
*/
struct nvmlEccSramErrorStatus_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG aggregateUncParity;
  unsigned PY_LONG_LONG aggregateUncSecDed;
  unsigned PY_LONG_LONG aggregateCor;
  unsigned PY_LONG_LONG volatileUncParity;
  unsigned PY_LONG_LONG volatileUncSecDed;
  unsigned PY_LONG_LONG volatileCor;
  unsigned PY_LONG_LONG aggregateUncBucketL2;
  unsigned PY_LONG_LONG aggregateUncBucketSm;
  unsigned PY_LONG_LONG aggregateUncBucketPcie;
  unsigned PY_LONG_LONG aggregateUncBucketMcu;
  unsigned PY_LONG_LONG aggregateUncBucketOther;
  unsigned int bThresholdExceeded;
};

/* "cuda/bindings/cy_nvml.pxd":736
 *     unsigned int bThresholdExceeded
 * 
 * ctypedef struct nvmlPlatformInfo_v2_t 'nvmlPlatformInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned char ibGuid[16]
*/
struct nvmlPlatformInfo_v2_t {
  unsigned int version;
  unsigned char ibGuid[16];
  unsigned char chassisSerialNumber[16];
  unsigned char slotNumber;
  unsigned char trayIndex;
  unsigned char hostId;
  unsigned char peerType;
  unsigned char moduleId;
};

/* "cuda/bindings/cy_nvml.pxd":753
 * ctypedef unsigned int nvmlVgpuTypeId_t 'nvmlVgpuTypeId_t'
 * ctypedef unsigned int nvmlVgpuInstance_t 'nvmlVgpuInstance_t'
 * ctypedef struct nvmlVgpuHeterogeneousMode_v1_t 'nvmlVgpuHeterogeneousMode_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int mode
*/
struct nvmlVgpuHeterogeneousMode_v1_t {
  unsigned int version;
  unsigned int mode;
};

/* "cuda/bindings/cy_nvml.pxd":757
 *     unsigned int mode
 * 
 * ctypedef struct nvmlVgpuPlacementId_v1_t 'nvmlVgpuPlacementId_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int placementId
*/
struct nvmlVgpuPlacementId_v1_t {
  unsigned int version;
  unsigned int placementId;
};

/* "cuda/bindings/cy_nvml.pxd":761
 *     unsigned int placementId
 * 
 * ctypedef struct nvmlVgpuPlacementList_v2_t 'nvmlVgpuPlacementList_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int placementSize
*/
struct nvmlVgpuPlacementList_v2_t {
  unsigned int version;
  unsigned int placementSize;
  unsigned int count;
  unsigned int *placementIds;
  unsigned int mode;
};

/* "cuda/bindings/cy_nvml.pxd":768
 *     unsigned int mode
 * 
 * ctypedef struct nvmlVgpuTypeBar1Info_v1_t 'nvmlVgpuTypeBar1Info_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long bar1Size
*/
struct nvmlVgpuTypeBar1Info_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG bar1Size;
};

/* "cuda/bindings/cy_nvml.pxd":772
 *     unsigned long long bar1Size
 * 
 * ctypedef struct nvmlVgpuRuntimeState_v1_t 'nvmlVgpuRuntimeState_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long size
*/
struct nvmlVgpuRuntimeState_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG size;
};

/* "cuda/bindings/cy_nvml.pxd":776
 *     unsigned long long size
 * 
 * ctypedef struct nvmlSystemConfComputeSettings_v1_t 'nvmlSystemConfComputeSettings_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int environment
*/
struct nvmlSystemConfComputeSettings_v1_t {
  unsigned int version;
  unsigned int environment;
  unsigned int ccFeature;
  unsigned int devToolsMode;
  unsigned int multiGpuMode;
};

/* "cuda/bindings/cy_nvml.pxd":783
 *     unsigned int multiGpuMode
 * 
 * ctypedef struct nvmlConfComputeSetKeyRotationThresholdInfo_v1_t 'nvmlConfComputeSetKeyRotationThresholdInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long maxAttackerAdvantage
*/
struct nvmlConfComputeSetKeyRotationThresholdInfo_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG maxAttackerAdvantage;
};

/* "cuda/bindings/cy_nvml.pxd":787
 *     unsigned long long maxAttackerAdvantage
 * 
 * ctypedef struct nvmlConfComputeGetKeyRotationThresholdInfo_v1_t 'nvmlConfComputeGetKeyRotationThresholdInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long attackerAdvantage
*/
struct nvmlConfComputeGetKeyRotationThresholdInfo_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG attackerAdvantage;
};

/* "cuda/bindings/cy_nvml.pxd":792
 * 
 * ctypedef unsigned char nvmlGpuFabricState_t 'nvmlGpuFabricState_t'
 * ctypedef struct nvmlSystemDriverBranchInfo_v1_t 'nvmlSystemDriverBranchInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     char branch[80]
*/
struct nvmlSystemDriverBranchInfo_v1_t {
  unsigned int version;
  char branch[80];
};

/* "cuda/bindings/cy_nvml.pxd":797
 * 
 * ctypedef unsigned int nvmlAffinityScope_t 'nvmlAffinityScope_t'
 * ctypedef struct nvmlTemperature_v1_t 'nvmlTemperature_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlTemperatureSensors_t sensorType
*/
struct nvmlTemperature_v1_t {
  unsigned int version;
  nvmlTemperatureSensors_t sensorType;
  int temperature;
};

/* "cuda/bindings/cy_nvml.pxd":802
 *     int temperature
 * 
 * ctypedef struct nvmlNvlinkSupportedBwModes_v1_t 'nvmlNvlinkSupportedBwModes_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned char bwModes[23]
*/
struct nvmlNvlinkSupportedBwModes_v1_t {
  unsigned int version;
  unsigned char bwModes[23];
  unsigned char totalBwModes;
};

/* "cuda/bindings/cy_nvml.pxd":807
 *     unsigned char totalBwModes
 * 
 * ctypedef struct nvmlNvlinkGetBwMode_v1_t 'nvmlNvlinkGetBwMode_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int bIsBest
*/
struct nvmlNvlinkGetBwMode_v1_t {
  unsigned int version;
  unsigned int bIsBest;
  unsigned char bwMode;
};

/* "cuda/bindings/cy_nvml.pxd":812
 *     unsigned char bwMode
 * 
 * ctypedef struct nvmlNvlinkSetBwMode_v1_t 'nvmlNvlinkSetBwMode_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int bSetBest
*/
struct nvmlNvlinkSetBwMode_v1_t {
  unsigned int version;
  unsigned int bSetBest;
  unsigned char bwMode;
};

/* "cuda/bindings/cy_nvml.pxd":817
 *     unsigned char bwMode
 * 
 * ctypedef struct nvmlDeviceCapabilities_v1_t 'nvmlDeviceCapabilities_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int capMask
*/
struct nvmlDeviceCapabilities_v1_t {
  unsigned int version;
  unsigned int capMask;
};

/* "cuda/bindings/cy_nvml.pxd":821
 *     unsigned int capMask
 * 
 * ctypedef struct nvmlPowerSmoothingProfile_v1_t 'nvmlPowerSmoothingProfile_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int profileId
*/
struct nvmlPowerSmoothingProfile_v1_t {
  unsigned int version;
  unsigned int profileId;
  unsigned int paramId;
  double value;
};

/* "cuda/bindings/cy_nvml.pxd":827
 *     double value
 * 
 * ctypedef struct nvmlPowerSmoothingState_v1_t 'nvmlPowerSmoothingState_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlEnableState_t state
*/
struct nvmlPowerSmoothingState_v1_t {
  unsigned int version;
  nvmlEnableState_t state;
};

/* "cuda/bindings/cy_nvml.pxd":831
 *     nvmlEnableState_t state
 * 
 * ctypedef struct nvmlDeviceAddressingMode_v1_t 'nvmlDeviceAddressingMode_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int value
*/
struct nvmlDeviceAddressingMode_v1_t {
  unsigned int version;
  unsigned int value;
};

/* "cuda/bindings/cy_nvml.pxd":835
 *     unsigned int value
 * 
 * ctypedef struct nvmlRepairStatus_v1_t 'nvmlRepairStatus_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int bChannelRepairPending
*/
struct nvmlRepairStatus_v1_t {
  unsigned int version;
  unsigned int bChannelRepairPending;
  unsigned int bTpcRepairPending;
};

/* "cuda/bindings/cy_nvml.pxd":840
 *     unsigned int bTpcRepairPending
 * 
 * ctypedef struct nvmlPdi_v1_t 'nvmlPdi_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long value
*/
struct nvmlPdi_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG value;
};

/* "cuda/bindings/cy_nvml.pxd":844
 *     unsigned long long value
 * 
 * ctypedef void* nvmlDevice_t 'nvmlDevice_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlGpuInstance_t 'nvmlGpuInstance_t'
 * ctypedef void* nvmlUnit_t 'nvmlUnit_t'
*/
typedef void *nvmlDevice_t;

/* "cuda/bindings/cy_nvml.pxd":845
 * 
 * ctypedef void* nvmlDevice_t 'nvmlDevice_t'
 * ctypedef void* nvmlGpuInstance_t 'nvmlGpuInstance_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlUnit_t 'nvmlUnit_t'
 * ctypedef void* nvmlEventSet_t 'nvmlEventSet_t'
*/
typedef void *nvmlGpuInstance_t;

/* "cuda/bindings/cy_nvml.pxd":846
 * ctypedef void* nvmlDevice_t 'nvmlDevice_t'
 * ctypedef void* nvmlGpuInstance_t 'nvmlGpuInstance_t'
 * ctypedef void* nvmlUnit_t 'nvmlUnit_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlEventSet_t 'nvmlEventSet_t'
 * ctypedef void* nvmlSystemEventSet_t 'nvmlSystemEventSet_t'
*/
typedef void *nvmlUnit_t;

/* "cuda/bindings/cy_nvml.pxd":847
 * ctypedef void* nvmlGpuInstance_t 'nvmlGpuInstance_t'
 * ctypedef void* nvmlUnit_t 'nvmlUnit_t'
 * ctypedef void* nvmlEventSet_t 'nvmlEventSet_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlSystemEventSet_t 'nvmlSystemEventSet_t'
 * ctypedef void* nvmlComputeInstance_t 'nvmlComputeInstance_t'
*/
typedef void *nvmlEventSet_t;

/* "cuda/bindings/cy_nvml.pxd":848
 * ctypedef void* nvmlUnit_t 'nvmlUnit_t'
 * ctypedef void* nvmlEventSet_t 'nvmlEventSet_t'
 * ctypedef void* nvmlSystemEventSet_t 'nvmlSystemEventSet_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlComputeInstance_t 'nvmlComputeInstance_t'
 * ctypedef void* nvmlGpmSample_t 'nvmlGpmSample_t'
*/
typedef void *nvmlSystemEventSet_t;

/* "cuda/bindings/cy_nvml.pxd":849
 * ctypedef void* nvmlEventSet_t 'nvmlEventSet_t'
 * ctypedef void* nvmlSystemEventSet_t 'nvmlSystemEventSet_t'
 * ctypedef void* nvmlComputeInstance_t 'nvmlComputeInstance_t'             # <<<<<<<<<<<<<<
 * ctypedef void* nvmlGpmSample_t 'nvmlGpmSample_t'
 * ctypedef struct nvmlPciInfo_t 'nvmlPciInfo_t':
*/
typedef void *nvmlComputeInstance_t;

/* "cuda/bindings/cy_nvml.pxd":850
 * ctypedef void* nvmlSystemEventSet_t 'nvmlSystemEventSet_t'
 * ctypedef void* nvmlComputeInstance_t 'nvmlComputeInstance_t'
 * ctypedef void* nvmlGpmSample_t 'nvmlGpmSample_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlPciInfo_t 'nvmlPciInfo_t':
 *     char busIdLegacy[16]
*/
typedef void *nvmlGpmSample_t;

/* "cuda/bindings/cy_nvml.pxd":851
 * ctypedef void* nvmlComputeInstance_t 'nvmlComputeInstance_t'
 * ctypedef void* nvmlGpmSample_t 'nvmlGpmSample_t'
 * ctypedef struct nvmlPciInfo_t 'nvmlPciInfo_t':             # <<<<<<<<<<<<<<
 *     char busIdLegacy[16]
 *     unsigned int domain
*/
struct nvmlPciInfo_t {
  char busIdLegacy[16];
  unsigned int domain;
  unsigned int bus;
  unsigned int device;
  unsigned int pciDeviceId;
  unsigned int pciSubSystemId;
  char busId[32];
};

/* "cuda/bindings/cy_nvml.pxd":860
 *     char busId[32]
 * 
 * ctypedef struct nvmlEccErrorCounts_t 'nvmlEccErrorCounts_t':             # <<<<<<<<<<<<<<
 *     unsigned long long l1Cache
 *     unsigned long long l2Cache
*/
struct nvmlEccErrorCounts_t {
  unsigned PY_LONG_LONG l1Cache;
  unsigned PY_LONG_LONG l2Cache;
  unsigned PY_LONG_LONG deviceMemory;
  unsigned PY_LONG_LONG registerFile;
};

/* "cuda/bindings/cy_nvml.pxd":866
 *     unsigned long long registerFile
 * 
 * ctypedef struct nvmlUtilization_t 'nvmlUtilization_t':             # <<<<<<<<<<<<<<
 *     unsigned int gpu
 *     unsigned int memory
*/
struct nvmlUtilization_t {
  unsigned int gpu;
  unsigned int memory;
};

/* "cuda/bindings/cy_nvml.pxd":870
 *     unsigned int memory
 * 
 * ctypedef struct nvmlMemory_t 'nvmlMemory_t':             # <<<<<<<<<<<<<<
 *     unsigned long long total
 *     unsigned long long free
*/
struct nvmlMemory_t {
  unsigned PY_LONG_LONG total;
  unsigned PY_LONG_LONG free;
  unsigned PY_LONG_LONG used;
};

/* "cuda/bindings/cy_nvml.pxd":875
 *     unsigned long long used
 * 
 * ctypedef struct nvmlMemory_v2_t 'nvmlMemory_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long total
*/
struct nvmlMemory_v2_t {
  unsigned int version;
  unsigned PY_LONG_LONG total;
  unsigned PY_LONG_LONG reserved;
  unsigned PY_LONG_LONG free;
  unsigned PY_LONG_LONG used;
};

/* "cuda/bindings/cy_nvml.pxd":882
 *     unsigned long long used
 * 
 * ctypedef struct nvmlBAR1Memory_t 'nvmlBAR1Memory_t':             # <<<<<<<<<<<<<<
 *     unsigned long long bar1Total
 *     unsigned long long bar1Free
*/
struct nvmlBAR1Memory_t {
  unsigned PY_LONG_LONG bar1Total;
  unsigned PY_LONG_LONG bar1Free;
  unsigned PY_LONG_LONG bar1Used;
};

/* "cuda/bindings/cy_nvml.pxd":887
 *     unsigned long long bar1Used
 * 
 * ctypedef struct nvmlProcessInfo_v1_t 'nvmlProcessInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int pid
 *     unsigned long long usedGpuMemory
*/
struct nvmlProcessInfo_v1_t {
  unsigned int pid;
  unsigned PY_LONG_LONG usedGpuMemory;
};

/* "cuda/bindings/cy_nvml.pxd":891
 *     unsigned long long usedGpuMemory
 * 
 * ctypedef struct nvmlProcessInfo_v2_t 'nvmlProcessInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int pid
 *     unsigned long long usedGpuMemory
*/
struct nvmlProcessInfo_v2_t {
  unsigned int pid;
  unsigned PY_LONG_LONG usedGpuMemory;
  unsigned int gpuInstanceId;
  unsigned int computeInstanceId;
};

/* "cuda/bindings/cy_nvml.pxd":897
 *     unsigned int computeInstanceId
 * 
 * ctypedef struct nvmlProcessInfo_t 'nvmlProcessInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int pid
 *     unsigned long long usedGpuMemory
*/
struct nvmlProcessInfo_t {
  unsigned int pid;
  unsigned PY_LONG_LONG usedGpuMemory;
  unsigned int gpuInstanceId;
  unsigned int computeInstanceId;
};

/* "cuda/bindings/cy_nvml.pxd":903
 *     unsigned int computeInstanceId
 * 
 * ctypedef struct nvmlProcessDetail_v1_t 'nvmlProcessDetail_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int pid
 *     unsigned long long usedGpuMemory
*/
struct nvmlProcessDetail_v1_t {
  unsigned int pid;
  unsigned PY_LONG_LONG usedGpuMemory;
  unsigned int gpuInstanceId;
  unsigned int computeInstanceId;
  unsigned PY_LONG_LONG usedGpuCcProtectedMemory;
};

/* "cuda/bindings/cy_nvml.pxd":910
 *     unsigned long long usedGpuCcProtectedMemory
 * 
 * ctypedef struct nvmlDeviceAttributes_t 'nvmlDeviceAttributes_t':             # <<<<<<<<<<<<<<
 *     unsigned int multiprocessorCount
 *     unsigned int sharedCopyEngineCount
*/
struct nvmlDeviceAttributes_t {
  unsigned int multiprocessorCount;
  unsigned int sharedCopyEngineCount;
  unsigned int sharedDecoderCount;
  unsigned int sharedEncoderCount;
  unsigned int sharedJpegCount;
  unsigned int sharedOfaCount;
  unsigned int gpuInstanceSliceCount;
  unsigned int computeInstanceSliceCount;
  unsigned PY_LONG_LONG memorySizeMB;
};

/* "cuda/bindings/cy_nvml.pxd":921
 *     unsigned long long memorySizeMB
 * 
 * ctypedef struct nvmlC2cModeInfo_v1_t 'nvmlC2cModeInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int isC2cEnabled
 * 
*/
struct nvmlC2cModeInfo_v1_t {
  unsigned int isC2cEnabled;
};

/* "cuda/bindings/cy_nvml.pxd":924
 *     unsigned int isC2cEnabled
 * 
 * ctypedef struct nvmlRowRemapperHistogramValues_t 'nvmlRowRemapperHistogramValues_t':             # <<<<<<<<<<<<<<
 *     unsigned int max
 *     unsigned int high
*/
struct nvmlRowRemapperHistogramValues_t {
  unsigned int max;
  unsigned int high;
  unsigned int partial;
  unsigned int low;
  unsigned int none;
};

/* "cuda/bindings/cy_nvml.pxd":931
 *     unsigned int none
 * 
 * ctypedef struct nvmlNvLinkUtilizationControl_t 'nvmlNvLinkUtilizationControl_t':             # <<<<<<<<<<<<<<
 *     nvmlNvLinkUtilizationCountUnits_t units
 *     nvmlNvLinkUtilizationCountPktTypes_t pktfilter
*/
struct nvmlNvLinkUtilizationControl_t {
  nvmlNvLinkUtilizationCountUnits_t units;
  nvmlNvLinkUtilizationCountPktTypes_t pktfilter;
};

/* "cuda/bindings/cy_nvml.pxd":935
 *     nvmlNvLinkUtilizationCountPktTypes_t pktfilter
 * 
 * ctypedef struct nvmlBridgeChipInfo_t 'nvmlBridgeChipInfo_t':             # <<<<<<<<<<<<<<
 *     nvmlBridgeChipType_t type
 *     unsigned int fwVersion
*/
struct nvmlBridgeChipInfo_t {
  nvmlBridgeChipType_t type;
  unsigned int fwVersion;
};

/* "cuda/bindings/cy_nvml.pxd":939
 *     unsigned int fwVersion
 * 
 * ctypedef union nvmlValue_t 'nvmlValue_t':             # <<<<<<<<<<<<<<
 *     double dVal
 *     int siVal
*/
union nvmlValue_t {
  double dVal;
  int siVal;
  unsigned int uiVal;
  unsigned long ulVal;
  unsigned PY_LONG_LONG ullVal;
  PY_LONG_LONG sllVal;
  unsigned short usVal;
};

/* "cuda/bindings/cy_nvml.pxd":948
 *     unsigned short usVal
 * 
 * ctypedef struct nvmlViolationTime_t 'nvmlViolationTime_t':             # <<<<<<<<<<<<<<
 *     unsigned long long referenceTime
 *     unsigned long long violationTime
*/
struct nvmlViolationTime_t {
  unsigned PY_LONG_LONG referenceTime;
  unsigned PY_LONG_LONG violationTime;
};

/* "cuda/bindings/cy_nvml.pxd":952
 *     unsigned long long violationTime
 * 
 * ctypedef struct _anon_pod0 '_anon_pod0':             # <<<<<<<<<<<<<<
 *     nvmlThermalController_t controller
 *     int defaultMinTemp
*/
struct _anon_pod0 {
  nvmlThermalController_t controller;
  int defaultMinTemp;
  int defaultMaxTemp;
  int currentTemp;
  nvmlThermalTarget_t target;
};

/* "cuda/bindings/cy_nvml.pxd":959
 *     nvmlThermalTarget_t target
 * 
 * ctypedef union nvmlUUIDValue_t 'nvmlUUIDValue_t':             # <<<<<<<<<<<<<<
 *     char str[41]
 *     unsigned char bytes[16]
*/
union nvmlUUIDValue_t {
  char str[41];
  unsigned char bytes[16];
};

/* "cuda/bindings/cy_nvml.pxd":963
 *     unsigned char bytes[16]
 * 
 * ctypedef struct nvmlClkMonFaultInfo_t 'nvmlClkMonFaultInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int clkApiDomain
 *     unsigned int clkDomainFaultMask
*/
struct nvmlClkMonFaultInfo_t {
  unsigned int clkApiDomain;
  unsigned int clkDomainFaultMask;
};

/* "cuda/bindings/cy_nvml.pxd":967
 *     unsigned int clkDomainFaultMask
 * 
 * ctypedef struct nvmlProcessUtilizationSample_t 'nvmlProcessUtilizationSample_t':             # <<<<<<<<<<<<<<
 *     unsigned int pid
 *     unsigned long long timeStamp
*/
struct nvmlProcessUtilizationSample_t {
  unsigned int pid;
  unsigned PY_LONG_LONG timeStamp;
  unsigned int smUtil;
  unsigned int memUtil;
  unsigned int encUtil;
  unsigned int decUtil;
};

/* "cuda/bindings/cy_nvml.pxd":975
 *     unsigned int decUtil
 * 
 * ctypedef struct nvmlProcessUtilizationInfo_v1_t 'nvmlProcessUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned long long timeStamp
 *     unsigned int pid
*/
struct nvmlProcessUtilizationInfo_v1_t {
  unsigned PY_LONG_LONG timeStamp;
  unsigned int pid;
  unsigned int smUtil;
  unsigned int memUtil;
  unsigned int encUtil;
  unsigned int decUtil;
  unsigned int jpgUtil;
  unsigned int ofaUtil;
};

/* "cuda/bindings/cy_nvml.pxd":985
 *     unsigned int ofaUtil
 * 
 * ctypedef struct nvmlPlatformInfo_v1_t 'nvmlPlatformInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned char ibGuid[16]
*/
struct nvmlPlatformInfo_v1_t {
  unsigned int version;
  unsigned char ibGuid[16];
  unsigned char rackGuid[16];
  unsigned char chassisPhysicalSlotNumber;
  unsigned char computeSlotIndex;
  unsigned char nodeIndex;
  unsigned char peerType;
  unsigned char moduleId;
};

/* "cuda/bindings/cy_nvml.pxd":995
 *     unsigned char moduleId
 * 
 * ctypedef struct _anon_pod1 '_anon_pod1':             # <<<<<<<<<<<<<<
 *     unsigned int bIsPresent
 *     unsigned int percentage
*/
struct _anon_pod1 {
  unsigned int bIsPresent;
  unsigned int percentage;
  unsigned int incThreshold;
  unsigned int decThreshold;
};

/* "cuda/bindings/cy_nvml.pxd":1001
 *     unsigned int decThreshold
 * 
 * ctypedef struct nvmlVgpuPlacementList_v1_t 'nvmlVgpuPlacementList_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int placementSize
*/
struct nvmlVgpuPlacementList_v1_t {
  unsigned int version;
  unsigned int placementSize;
  unsigned int count;
  unsigned int *placementIds;
};

/* "cuda/bindings/cy_nvml.pxd":1007
 *     unsigned int* placementIds
 * 
 * ctypedef struct _anon_pod2 '_anon_pod2':             # <<<<<<<<<<<<<<
 *     unsigned int avgFactor
 *     unsigned int timeslice
*/
struct _anon_pod2 {
  unsigned int avgFactor;
  unsigned int timeslice;
};

/* "cuda/bindings/cy_nvml.pxd":1011
 *     unsigned int timeslice
 * 
 * ctypedef struct _anon_pod3 '_anon_pod3':             # <<<<<<<<<<<<<<
 *     unsigned int timeslice
 * 
*/
struct _anon_pod3 {
  unsigned int timeslice;
};

/* "cuda/bindings/cy_nvml.pxd":1014
 *     unsigned int timeslice
 * 
 * ctypedef struct nvmlVgpuSchedulerLogEntry_t 'nvmlVgpuSchedulerLogEntry_t':             # <<<<<<<<<<<<<<
 *     unsigned long long timestamp
 *     unsigned long long timeRunTotal
*/
struct nvmlVgpuSchedulerLogEntry_t {
  unsigned PY_LONG_LONG timestamp;
  unsigned PY_LONG_LONG timeRunTotal;
  unsigned PY_LONG_LONG timeRun;
  unsigned int swRunlistId;
  unsigned PY_LONG_LONG targetTimeSlice;
  unsigned PY_LONG_LONG cumulativePreemptionTime;
};

/* "cuda/bindings/cy_nvml.pxd":1022
 *     unsigned long long cumulativePreemptionTime
 * 
 * ctypedef struct _anon_pod4 '_anon_pod4':             # <<<<<<<<<<<<<<
 *     unsigned int avgFactor
 *     unsigned int frequency
*/
struct _anon_pod4 {
  unsigned int avgFactor;
  unsigned int frequency;
};

/* "cuda/bindings/cy_nvml.pxd":1026
 *     unsigned int frequency
 * 
 * ctypedef struct _anon_pod5 '_anon_pod5':             # <<<<<<<<<<<<<<
 *     unsigned int timeslice
 * 
*/
struct _anon_pod5 {
  unsigned int timeslice;
};

/* "cuda/bindings/cy_nvml.pxd":1029
 *     unsigned int timeslice
 * 
 * ctypedef struct nvmlVgpuSchedulerCapabilities_t 'nvmlVgpuSchedulerCapabilities_t':             # <<<<<<<<<<<<<<
 *     unsigned int supportedSchedulers[3]
 *     unsigned int maxTimeslice
*/
struct nvmlVgpuSchedulerCapabilities_t {
  unsigned int supportedSchedulers[3];
  unsigned int maxTimeslice;
  unsigned int minTimeslice;
  unsigned int isArrModeSupported;
  unsigned int maxFrequencyForARR;
  unsigned int minFrequencyForARR;
  unsigned int maxAvgFactorForARR;
  unsigned int minAvgFactorForARR;
};

/* "cuda/bindings/cy_nvml.pxd":1039
 *     unsigned int minAvgFactorForARR
 * 
 * ctypedef struct nvmlVgpuLicenseExpiry_t 'nvmlVgpuLicenseExpiry_t':             # <<<<<<<<<<<<<<
 *     unsigned int year
 *     unsigned short month
*/
struct nvmlVgpuLicenseExpiry_t {
  unsigned int year;
  unsigned short month;
  unsigned short day;
  unsigned short hour;
  unsigned short min;
  unsigned short sec;
  unsigned char status;
};

/* "cuda/bindings/cy_nvml.pxd":1048
 *     unsigned char status
 * 
 * ctypedef struct nvmlGridLicenseExpiry_t 'nvmlGridLicenseExpiry_t':             # <<<<<<<<<<<<<<
 *     unsigned int year
 *     unsigned short month
*/
struct nvmlGridLicenseExpiry_t {
  unsigned int year;
  unsigned short month;
  unsigned short day;
  unsigned short hour;
  unsigned short min;
  unsigned short sec;
  unsigned char status;
};

/* "cuda/bindings/cy_nvml.pxd":1057
 *     unsigned char status
 * 
 * ctypedef struct nvmlNvLinkPowerThres_t 'nvmlNvLinkPowerThres_t':             # <<<<<<<<<<<<<<
 *     unsigned int lowPwrThreshold
 * 
*/
struct nvmlNvLinkPowerThres_t {
  unsigned int lowPwrThreshold;
};

/* "cuda/bindings/cy_nvml.pxd":1060
 *     unsigned int lowPwrThreshold
 * 
 * ctypedef struct nvmlHwbcEntry_t 'nvmlHwbcEntry_t':             # <<<<<<<<<<<<<<
 *     unsigned int hwbcId
 *     char firmwareVersion[32]
*/
struct nvmlHwbcEntry_t {
  unsigned int hwbcId;
  char firmwareVersion[32];
};

/* "cuda/bindings/cy_nvml.pxd":1064
 *     char firmwareVersion[32]
 * 
 * ctypedef struct nvmlLedState_t 'nvmlLedState_t':             # <<<<<<<<<<<<<<
 *     char cause[256]
 *     nvmlLedColor_t color
*/
struct nvmlLedState_t {
  char cause[256];
  nvmlLedColor_t color;
};

/* "cuda/bindings/cy_nvml.pxd":1068
 *     nvmlLedColor_t color
 * 
 * ctypedef struct nvmlUnitInfo_t 'nvmlUnitInfo_t':             # <<<<<<<<<<<<<<
 *     char name[96]
 *     char id[96]
*/
struct nvmlUnitInfo_t {
  char name[96];
  char id[96];
  char serial[96];
  char firmwareVersion[96];
};

/* "cuda/bindings/cy_nvml.pxd":1074
 *     char firmwareVersion[96]
 * 
 * ctypedef struct nvmlPSUInfo_t 'nvmlPSUInfo_t':             # <<<<<<<<<<<<<<
 *     char state[256]
 *     unsigned int current
*/
struct nvmlPSUInfo_t {
  char state[256];
  unsigned int current;
  unsigned int voltage;
  unsigned int power;
};

/* "cuda/bindings/cy_nvml.pxd":1080
 *     unsigned int power
 * 
 * ctypedef struct nvmlUnitFanInfo_t 'nvmlUnitFanInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int speed
 *     nvmlFanState_t state
*/
struct nvmlUnitFanInfo_t {
  unsigned int speed;
  nvmlFanState_t state;
};

/* "cuda/bindings/cy_nvml.pxd":1084
 *     nvmlFanState_t state
 * 
 * ctypedef struct nvmlSystemEventData_v1_t 'nvmlSystemEventData_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned long long eventType
 *     unsigned int gpuId
*/
struct nvmlSystemEventData_v1_t {
  unsigned PY_LONG_LONG eventType;
  unsigned int gpuId;
};

/* "cuda/bindings/cy_nvml.pxd":1088
 *     unsigned int gpuId
 * 
 * ctypedef struct nvmlAccountingStats_t 'nvmlAccountingStats_t':             # <<<<<<<<<<<<<<
 *     unsigned int gpuUtilization
 *     unsigned int memoryUtilization
*/
struct nvmlAccountingStats_t {
  unsigned int gpuUtilization;
  unsigned int memoryUtilization;
  unsigned PY_LONG_LONG maxMemoryUsage;
  unsigned PY_LONG_LONG time;
  unsigned PY_LONG_LONG startTime;
  unsigned int isRunning;
  unsigned int reserved[5];
};

/* "cuda/bindings/cy_nvml.pxd":1097
 *     unsigned int reserved[5]
 * 
 * ctypedef struct nvmlFBCStats_t 'nvmlFBCStats_t':             # <<<<<<<<<<<<<<
 *     unsigned int sessionsCount
 *     unsigned int averageFPS
*/
struct nvmlFBCStats_t {
  unsigned int sessionsCount;
  unsigned int averageFPS;
  unsigned int averageLatency;
};

/* "cuda/bindings/cy_nvml.pxd":1102
 *     unsigned int averageLatency
 * 
 * ctypedef struct nvmlConfComputeSystemCaps_t 'nvmlConfComputeSystemCaps_t':             # <<<<<<<<<<<<<<
 *     unsigned int cpuCaps
 *     unsigned int gpusCaps
*/
struct nvmlConfComputeSystemCaps_t {
  unsigned int cpuCaps;
  unsigned int gpusCaps;
};

/* "cuda/bindings/cy_nvml.pxd":1106
 *     unsigned int gpusCaps
 * 
 * ctypedef struct nvmlConfComputeSystemState_t 'nvmlConfComputeSystemState_t':             # <<<<<<<<<<<<<<
 *     unsigned int environment
 *     unsigned int ccFeature
*/
struct nvmlConfComputeSystemState_t {
  unsigned int environment;
  unsigned int ccFeature;
  unsigned int devToolsMode;
};

/* "cuda/bindings/cy_nvml.pxd":1111
 *     unsigned int devToolsMode
 * 
 * ctypedef struct nvmlConfComputeMemSizeInfo_t 'nvmlConfComputeMemSizeInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned long long protectedMemSizeKib
 *     unsigned long long unprotectedMemSizeKib
*/
struct nvmlConfComputeMemSizeInfo_t {
  unsigned PY_LONG_LONG protectedMemSizeKib;
  unsigned PY_LONG_LONG unprotectedMemSizeKib;
};

/* "cuda/bindings/cy_nvml.pxd":1115
 *     unsigned long long unprotectedMemSizeKib
 * 
 * ctypedef struct nvmlConfComputeGpuCertificate_t 'nvmlConfComputeGpuCertificate_t':             # <<<<<<<<<<<<<<
 *     unsigned int certChainSize
 *     unsigned int attestationCertChainSize
*/
struct nvmlConfComputeGpuCertificate_t {
  unsigned int certChainSize;
  unsigned int attestationCertChainSize;
  unsigned char certChain[4096];
  unsigned char attestationCertChain[5120];
};

/* "cuda/bindings/cy_nvml.pxd":1121
 *     unsigned char attestationCertChain[0x1400]
 * 
 * ctypedef struct nvmlConfComputeGpuAttestationReport_t 'nvmlConfComputeGpuAttestationReport_t':             # <<<<<<<<<<<<<<
 *     unsigned int isCecAttestationReportPresent
 *     unsigned int attestationReportSize
*/
struct nvmlConfComputeGpuAttestationReport_t {
  unsigned int isCecAttestationReportPresent;
  unsigned int attestationReportSize;
  unsigned int cecAttestationReportSize;
  unsigned char nonce[32];
  unsigned char attestationReport[8192];
  unsigned char cecAttestationReport[4096];
};

/* "cuda/bindings/cy_nvml.pxd":1129
 *     unsigned char cecAttestationReport[0x1000]
 * 
 * ctypedef struct nvmlVgpuVersion_t 'nvmlVgpuVersion_t':             # <<<<<<<<<<<<<<
 *     unsigned int minVersion
 *     unsigned int maxVersion
*/
struct nvmlVgpuVersion_t {
  unsigned int minVersion;
  unsigned int maxVersion;
};

/* "cuda/bindings/cy_nvml.pxd":1133
 *     unsigned int maxVersion
 * 
 * ctypedef struct nvmlVgpuMetadata_t 'nvmlVgpuMetadata_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int revision
*/
struct nvmlVgpuMetadata_t {
  unsigned int version;
  unsigned int revision;
  nvmlVgpuGuestInfoState_t guestInfoState;
  char guestDriverVersion[80];
  char hostDriverVersion[80];
  unsigned int reserved[6];
  unsigned int vgpuVirtualizationCaps;
  unsigned int guestVgpuVersion;
  unsigned int opaqueDataSize;
  char opaqueData[4];
};

/* "cuda/bindings/cy_nvml.pxd":1145
 *     char opaqueData[4]
 * 
 * ctypedef struct nvmlVgpuPgpuCompatibility_t 'nvmlVgpuPgpuCompatibility_t':             # <<<<<<<<<<<<<<
 *     nvmlVgpuVmCompatibility_t vgpuVmCompatibility
 *     nvmlVgpuPgpuCompatibilityLimitCode_t compatibilityLimitCode
*/
struct nvmlVgpuPgpuCompatibility_t {
  nvmlVgpuVmCompatibility_t vgpuVmCompatibility;
  nvmlVgpuPgpuCompatibilityLimitCode_t compatibilityLimitCode;
};

/* "cuda/bindings/cy_nvml.pxd":1149
 *     nvmlVgpuPgpuCompatibilityLimitCode_t compatibilityLimitCode
 * 
 * ctypedef struct nvmlGpuInstancePlacement_t 'nvmlGpuInstancePlacement_t':             # <<<<<<<<<<<<<<
 *     unsigned int start
 *     unsigned int size
*/
struct nvmlGpuInstancePlacement_t {
  unsigned int start;
  unsigned int size;
};

/* "cuda/bindings/cy_nvml.pxd":1153
 *     unsigned int size
 * 
 * ctypedef struct nvmlGpuInstanceProfileInfo_t 'nvmlGpuInstanceProfileInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int id
 *     unsigned int isP2pSupported
*/
struct nvmlGpuInstanceProfileInfo_t {
  unsigned int id;
  unsigned int isP2pSupported;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int copyEngineCount;
  unsigned int decoderCount;
  unsigned int encoderCount;
  unsigned int jpegCount;
  unsigned int ofaCount;
  unsigned PY_LONG_LONG memorySizeMB;
};

/* "cuda/bindings/cy_nvml.pxd":1166
 *     unsigned long long memorySizeMB
 * 
 * ctypedef struct nvmlGpuInstanceProfileInfo_v2_t 'nvmlGpuInstanceProfileInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int id
*/
struct nvmlGpuInstanceProfileInfo_v2_t {
  unsigned int version;
  unsigned int id;
  unsigned int isP2pSupported;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int copyEngineCount;
  unsigned int decoderCount;
  unsigned int encoderCount;
  unsigned int jpegCount;
  unsigned int ofaCount;
  unsigned PY_LONG_LONG memorySizeMB;
  char name[96];
};

/* "cuda/bindings/cy_nvml.pxd":1181
 *     char name[96]
 * 
 * ctypedef struct nvmlGpuInstanceProfileInfo_v3_t 'nvmlGpuInstanceProfileInfo_v3_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int id
*/
struct nvmlGpuInstanceProfileInfo_v3_t {
  unsigned int version;
  unsigned int id;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int copyEngineCount;
  unsigned int decoderCount;
  unsigned int encoderCount;
  unsigned int jpegCount;
  unsigned int ofaCount;
  unsigned PY_LONG_LONG memorySizeMB;
  char name[96];
  unsigned int capabilities;
};

/* "cuda/bindings/cy_nvml.pxd":1196
 *     unsigned int capabilities
 * 
 * ctypedef struct nvmlComputeInstancePlacement_t 'nvmlComputeInstancePlacement_t':             # <<<<<<<<<<<<<<
 *     unsigned int start
 *     unsigned int size
*/
struct nvmlComputeInstancePlacement_t {
  unsigned int start;
  unsigned int size;
};

/* "cuda/bindings/cy_nvml.pxd":1200
 *     unsigned int size
 * 
 * ctypedef struct nvmlComputeInstanceProfileInfo_t 'nvmlComputeInstanceProfileInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int id
 *     unsigned int sliceCount
*/
struct nvmlComputeInstanceProfileInfo_t {
  unsigned int id;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int sharedCopyEngineCount;
  unsigned int sharedDecoderCount;
  unsigned int sharedEncoderCount;
  unsigned int sharedJpegCount;
  unsigned int sharedOfaCount;
};

/* "cuda/bindings/cy_nvml.pxd":1211
 *     unsigned int sharedOfaCount
 * 
 * ctypedef struct nvmlComputeInstanceProfileInfo_v2_t 'nvmlComputeInstanceProfileInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int id
*/
struct nvmlComputeInstanceProfileInfo_v2_t {
  unsigned int version;
  unsigned int id;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int sharedCopyEngineCount;
  unsigned int sharedDecoderCount;
  unsigned int sharedEncoderCount;
  unsigned int sharedJpegCount;
  unsigned int sharedOfaCount;
  char name[96];
};

/* "cuda/bindings/cy_nvml.pxd":1224
 *     char name[96]
 * 
 * ctypedef struct nvmlComputeInstanceProfileInfo_v3_t 'nvmlComputeInstanceProfileInfo_v3_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int id
*/
struct nvmlComputeInstanceProfileInfo_v3_t {
  unsigned int version;
  unsigned int id;
  unsigned int sliceCount;
  unsigned int instanceCount;
  unsigned int multiprocessorCount;
  unsigned int sharedCopyEngineCount;
  unsigned int sharedDecoderCount;
  unsigned int sharedEncoderCount;
  unsigned int sharedJpegCount;
  unsigned int sharedOfaCount;
  char name[96];
  unsigned int capabilities;
};

/* "cuda/bindings/cy_nvml.pxd":1238
 *     unsigned int capabilities
 * 
 * ctypedef struct _anon_pod6 '_anon_pod6':             # <<<<<<<<<<<<<<
 *     char* shortName
 *     char* longName
*/
struct _anon_pod6 {
  char *shortName;
  char *longName;
  char *unit;
};

/* "cuda/bindings/cy_nvml.pxd":1243
 *     char* unit
 * 
 * ctypedef struct nvmlGpmSupport_t 'nvmlGpmSupport_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int isSupportedDevice
*/
struct nvmlGpmSupport_t {
  unsigned int version;
  unsigned int isSupportedDevice;
};

/* "cuda/bindings/cy_nvml.pxd":1247
 *     unsigned int isSupportedDevice
 * 
 * ctypedef struct nvmlMask255_t 'nvmlMask255_t':             # <<<<<<<<<<<<<<
 *     unsigned int mask[8]
 * 
*/
struct nvmlMask255_t {
  unsigned int mask[8];
};

/* "cuda/bindings/cy_nvml.pxd":1250
 *     unsigned int mask[8]
 * 
 * ctypedef struct nvmlDevicePowerMizerModes_v1_t 'nvmlDevicePowerMizerModes_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int currentMode
 *     unsigned int mode
*/
struct nvmlDevicePowerMizerModes_v1_t {
  unsigned int currentMode;
  unsigned int mode;
  unsigned int supportedPowerMizerModes;
};

/* "cuda/bindings/cy_nvml.pxd":1255
 *     unsigned int supportedPowerMizerModes
 * 
 * ctypedef struct nvmlHostname_v1_t 'nvmlHostname_v1_t':             # <<<<<<<<<<<<<<
 *     char value[64]
 * 
*/
struct nvmlHostname_v1_t {
  char value[64];
};

/* "cuda/bindings/cy_nvml.pxd":1258
 *     char value[64]
 * 
 * ctypedef struct nvmlEccSramUniqueUncorrectedErrorEntry_v1_t 'nvmlEccSramUniqueUncorrectedErrorEntry_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int unit
 *     unsigned int location
*/
struct nvmlEccSramUniqueUncorrectedErrorEntry_v1_t {
  unsigned int unit;
  unsigned int location;
  unsigned int sublocation;
  unsigned int extlocation;
  unsigned int address;
  unsigned int isParity;
  unsigned int count;
};

/* "cuda/bindings/cy_nvml.pxd":1267
 *     unsigned int count
 * 
 * ctypedef struct nvmlNvLinkInfo_v1_t 'nvmlNvLinkInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int isNvleEnabled
*/
struct nvmlNvLinkInfo_v1_t {
  unsigned int version;
  unsigned int isNvleEnabled;
};

/* "cuda/bindings/cy_nvml.pxd":1271
 *     unsigned int isNvleEnabled
 * 
 * ctypedef struct nvmlNvlinkFirmwareVersion_t 'nvmlNvlinkFirmwareVersion_t':             # <<<<<<<<<<<<<<
 *     unsigned char ucodeType
 *     unsigned int major
*/
struct nvmlNvlinkFirmwareVersion_t {
  unsigned char ucodeType;
  unsigned int major;
  unsigned int minor;
  unsigned int subMinor;
};

/* "cuda/bindings/cy_nvml.pxd":1277
 *     unsigned int subMinor
 * 
 * ctypedef union _anon_pod7 '_anon_pod7':             # <<<<<<<<<<<<<<
 *     unsigned char inData[496]
 *     unsigned char outData[496]
*/
union _anon_pod7 {
  unsigned char inData[496];
  unsigned char outData[496];
};

/* "cuda/bindings/cy_nvml.pxd":1281
 *     unsigned char outData[496]
 * 
 * ctypedef nvmlPciInfoExt_v1_t nvmlPciInfoExt_t 'nvmlPciInfoExt_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t 'nvmlCoolerInfo_t'
 * ctypedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t 'nvmlDramEncryptionInfo_t'
*/
typedef nvmlPciInfoExt_v1_t nvmlPciInfoExt_t;

/* "cuda/bindings/cy_nvml.pxd":1282
 * 
 * ctypedef nvmlPciInfoExt_v1_t nvmlPciInfoExt_t 'nvmlPciInfoExt_t'
 * ctypedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t 'nvmlCoolerInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t 'nvmlDramEncryptionInfo_t'
 * ctypedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t 'nvmlMarginTemperature_t'
*/
typedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1283
 * ctypedef nvmlPciInfoExt_v1_t nvmlPciInfoExt_t 'nvmlPciInfoExt_t'
 * ctypedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t 'nvmlCoolerInfo_t'
 * ctypedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t 'nvmlDramEncryptionInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t 'nvmlMarginTemperature_t'
 * ctypedef nvmlClockOffset_v1_t nvmlClockOffset_t 'nvmlClockOffset_t'
*/
typedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1284
 * ctypedef nvmlCoolerInfo_v1_t nvmlCoolerInfo_t 'nvmlCoolerInfo_t'
 * ctypedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t 'nvmlDramEncryptionInfo_t'
 * ctypedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t 'nvmlMarginTemperature_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlClockOffset_v1_t nvmlClockOffset_t 'nvmlClockOffset_t'
 * ctypedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t 'nvmlFanSpeedInfo_t'
*/
typedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t;

/* "cuda/bindings/cy_nvml.pxd":1285
 * ctypedef nvmlDramEncryptionInfo_v1_t nvmlDramEncryptionInfo_t 'nvmlDramEncryptionInfo_t'
 * ctypedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t 'nvmlMarginTemperature_t'
 * ctypedef nvmlClockOffset_v1_t nvmlClockOffset_t 'nvmlClockOffset_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t 'nvmlFanSpeedInfo_t'
 * ctypedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t 'nvmlDevicePerfModes_t'
*/
typedef nvmlClockOffset_v1_t nvmlClockOffset_t;

/* "cuda/bindings/cy_nvml.pxd":1286
 * ctypedef nvmlMarginTemperature_v1_t nvmlMarginTemperature_t 'nvmlMarginTemperature_t'
 * ctypedef nvmlClockOffset_v1_t nvmlClockOffset_t 'nvmlClockOffset_t'
 * ctypedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t 'nvmlFanSpeedInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t 'nvmlDevicePerfModes_t'
 * ctypedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t 'nvmlDeviceCurrentClockFreqs_t'
*/
typedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1287
 * ctypedef nvmlClockOffset_v1_t nvmlClockOffset_t 'nvmlClockOffset_t'
 * ctypedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t 'nvmlFanSpeedInfo_t'
 * ctypedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t 'nvmlDevicePerfModes_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t 'nvmlDeviceCurrentClockFreqs_t'
 * ctypedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t 'nvmlEccSramErrorStatus_t'
*/
typedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t;

/* "cuda/bindings/cy_nvml.pxd":1288
 * ctypedef nvmlFanSpeedInfo_v1_t nvmlFanSpeedInfo_t 'nvmlFanSpeedInfo_t'
 * ctypedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t 'nvmlDevicePerfModes_t'
 * ctypedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t 'nvmlDeviceCurrentClockFreqs_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t 'nvmlEccSramErrorStatus_t'
 * ctypedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t 'nvmlPlatformInfo_t'
*/
typedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t;

/* "cuda/bindings/cy_nvml.pxd":1289
 * ctypedef nvmlDevicePerfModes_v1_t nvmlDevicePerfModes_t 'nvmlDevicePerfModes_t'
 * ctypedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t 'nvmlDeviceCurrentClockFreqs_t'
 * ctypedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t 'nvmlEccSramErrorStatus_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t 'nvmlPlatformInfo_t'
 * ctypedef struct nvmlPowerValue_v2_t 'nvmlPowerValue_v2_t':
*/
typedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t;

/* "cuda/bindings/cy_nvml.pxd":1290
 * ctypedef nvmlDeviceCurrentClockFreqs_v1_t nvmlDeviceCurrentClockFreqs_t 'nvmlDeviceCurrentClockFreqs_t'
 * ctypedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t 'nvmlEccSramErrorStatus_t'
 * ctypedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t 'nvmlPlatformInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlPowerValue_v2_t 'nvmlPowerValue_v2_t':
 *     unsigned int version
*/
typedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1291
 * ctypedef nvmlEccSramErrorStatus_v1_t nvmlEccSramErrorStatus_t 'nvmlEccSramErrorStatus_t'
 * ctypedef nvmlPlatformInfo_v2_t nvmlPlatformInfo_t 'nvmlPlatformInfo_t'
 * ctypedef struct nvmlPowerValue_v2_t 'nvmlPowerValue_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlPowerScopeType_t powerScope
*/
struct nvmlPowerValue_v2_t {
  unsigned int version;
  nvmlPowerScopeType_t powerScope;
  unsigned int powerValueMw;
};

/* "cuda/bindings/cy_nvml.pxd":1296
 *     unsigned int powerValueMw
 * 
 * ctypedef struct nvmlVgpuTypeIdInfo_v1_t 'nvmlVgpuTypeIdInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int vgpuCount
*/
struct nvmlVgpuTypeIdInfo_v1_t {
  unsigned int version;
  unsigned int vgpuCount;
  nvmlVgpuTypeId_t *vgpuTypeIds;
};

/* "cuda/bindings/cy_nvml.pxd":1301
 *     nvmlVgpuTypeId_t* vgpuTypeIds
 * 
 * ctypedef struct nvmlVgpuTypeMaxInstance_v1_t 'nvmlVgpuTypeMaxInstance_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlVgpuTypeId_t vgpuTypeId
*/
struct nvmlVgpuTypeMaxInstance_v1_t {
  unsigned int version;
  nvmlVgpuTypeId_t vgpuTypeId;
  unsigned int maxInstancePerGI;
};

/* "cuda/bindings/cy_nvml.pxd":1306
 *     unsigned int maxInstancePerGI
 * 
 * ctypedef struct nvmlVgpuCreatablePlacementInfo_v1_t 'nvmlVgpuCreatablePlacementInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlVgpuTypeId_t vgpuTypeId
*/
struct nvmlVgpuCreatablePlacementInfo_v1_t {
  unsigned int version;
  nvmlVgpuTypeId_t vgpuTypeId;
  unsigned int count;
  unsigned int *placementIds;
  unsigned int placementSize;
};

/* "cuda/bindings/cy_nvml.pxd":1313
 *     unsigned int placementSize
 * 
 * ctypedef struct nvmlVgpuProcessUtilizationSample_t 'nvmlVgpuProcessUtilizationSample_t':             # <<<<<<<<<<<<<<
 *     nvmlVgpuInstance_t vgpuInstance
 *     unsigned int pid
*/
struct nvmlVgpuProcessUtilizationSample_t {
  nvmlVgpuInstance_t vgpuInstance;
  unsigned int pid;
  char processName[64];
  unsigned PY_LONG_LONG timeStamp;
  unsigned int smUtil;
  unsigned int memUtil;
  unsigned int encUtil;
  unsigned int decUtil;
};

/* "cuda/bindings/cy_nvml.pxd":1323
 *     unsigned int decUtil
 * 
 * ctypedef struct nvmlVgpuProcessUtilizationInfo_v1_t 'nvmlVgpuProcessUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     char processName[64]
 *     unsigned long long timeStamp
*/
struct nvmlVgpuProcessUtilizationInfo_v1_t {
  char processName[64];
  unsigned PY_LONG_LONG timeStamp;
  nvmlVgpuInstance_t vgpuInstance;
  unsigned int pid;
  unsigned int smUtil;
  unsigned int memUtil;
  unsigned int encUtil;
  unsigned int decUtil;
  unsigned int jpgUtil;
  unsigned int ofaUtil;
};

/* "cuda/bindings/cy_nvml.pxd":1335
 *     unsigned int ofaUtil
 * 
 * ctypedef struct nvmlActiveVgpuInstanceInfo_v1_t 'nvmlActiveVgpuInstanceInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int vgpuCount
*/
struct nvmlActiveVgpuInstanceInfo_v1_t {
  unsigned int version;
  unsigned int vgpuCount;
  nvmlVgpuInstance_t *vgpuInstances;
};

/* "cuda/bindings/cy_nvml.pxd":1340
 *     nvmlVgpuInstance_t* vgpuInstances
 * 
 * ctypedef struct nvmlEncoderSessionInfo_t 'nvmlEncoderSessionInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int sessionId
 *     unsigned int pid
*/
struct nvmlEncoderSessionInfo_t {
  unsigned int sessionId;
  unsigned int pid;
  nvmlVgpuInstance_t vgpuInstance;
  nvmlEncoderType_t codecType;
  unsigned int hResolution;
  unsigned int vResolution;
  unsigned int averageFps;
  unsigned int averageLatency;
};

/* "cuda/bindings/cy_nvml.pxd":1350
 *     unsigned int averageLatency
 * 
 * ctypedef struct nvmlFBCSessionInfo_t 'nvmlFBCSessionInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int sessionId
 *     unsigned int pid
*/
struct nvmlFBCSessionInfo_t {
  unsigned int sessionId;
  unsigned int pid;
  nvmlVgpuInstance_t vgpuInstance;
  unsigned int displayOrdinal;
  nvmlFBCSessionType_t sessionType;
  unsigned int sessionFlags;
  unsigned int hMaxResolution;
  unsigned int vMaxResolution;
  unsigned int hResolution;
  unsigned int vResolution;
  unsigned int averageFPS;
  unsigned int averageLatency;
};

/* "cuda/bindings/cy_nvml.pxd":1364
 *     unsigned int averageLatency
 * 
 * ctypedef nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_t 'nvmlVgpuHeterogeneousMode_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t 'nvmlVgpuPlacementId_t'
 * ctypedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t 'nvmlVgpuPlacementList_t'
*/
typedef nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_t;

/* "cuda/bindings/cy_nvml.pxd":1365
 * 
 * ctypedef nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_t 'nvmlVgpuHeterogeneousMode_t'
 * ctypedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t 'nvmlVgpuPlacementId_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t 'nvmlVgpuPlacementList_t'
 * ctypedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t 'nvmlVgpuTypeBar1Info_t'
*/
typedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t;

/* "cuda/bindings/cy_nvml.pxd":1366
 * ctypedef nvmlVgpuHeterogeneousMode_v1_t nvmlVgpuHeterogeneousMode_t 'nvmlVgpuHeterogeneousMode_t'
 * ctypedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t 'nvmlVgpuPlacementId_t'
 * ctypedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t 'nvmlVgpuPlacementList_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t 'nvmlVgpuTypeBar1Info_t'
 * ctypedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t 'nvmlVgpuRuntimeState_t'
*/
typedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t;

/* "cuda/bindings/cy_nvml.pxd":1367
 * ctypedef nvmlVgpuPlacementId_v1_t nvmlVgpuPlacementId_t 'nvmlVgpuPlacementId_t'
 * ctypedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t 'nvmlVgpuPlacementList_t'
 * ctypedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t 'nvmlVgpuTypeBar1Info_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t 'nvmlVgpuRuntimeState_t'
 * ctypedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t 'nvmlSystemConfComputeSettings_t'
*/
typedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t;

/* "cuda/bindings/cy_nvml.pxd":1368
 * ctypedef nvmlVgpuPlacementList_v2_t nvmlVgpuPlacementList_t 'nvmlVgpuPlacementList_t'
 * ctypedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t 'nvmlVgpuTypeBar1Info_t'
 * ctypedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t 'nvmlVgpuRuntimeState_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t 'nvmlSystemConfComputeSettings_t'
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t 'nvmlConfComputeSetKeyRotationThresholdInfo_t'
*/
typedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t;

/* "cuda/bindings/cy_nvml.pxd":1369
 * ctypedef nvmlVgpuTypeBar1Info_v1_t nvmlVgpuTypeBar1Info_t 'nvmlVgpuTypeBar1Info_t'
 * ctypedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t 'nvmlVgpuRuntimeState_t'
 * ctypedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t 'nvmlSystemConfComputeSettings_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t 'nvmlConfComputeSetKeyRotationThresholdInfo_t'
 * ctypedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t 'nvmlConfComputeGetKeyRotationThresholdInfo_t'
*/
typedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t;

/* "cuda/bindings/cy_nvml.pxd":1370
 * ctypedef nvmlVgpuRuntimeState_v1_t nvmlVgpuRuntimeState_t 'nvmlVgpuRuntimeState_t'
 * ctypedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t 'nvmlSystemConfComputeSettings_t'
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t 'nvmlConfComputeSetKeyRotationThresholdInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t 'nvmlConfComputeGetKeyRotationThresholdInfo_t'
 * ctypedef struct nvmlGpuFabricInfo_t 'nvmlGpuFabricInfo_t':
*/
typedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1371
 * ctypedef nvmlSystemConfComputeSettings_v1_t nvmlSystemConfComputeSettings_t 'nvmlSystemConfComputeSettings_t'
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t 'nvmlConfComputeSetKeyRotationThresholdInfo_t'
 * ctypedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t 'nvmlConfComputeGetKeyRotationThresholdInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlGpuFabricInfo_t 'nvmlGpuFabricInfo_t':
 *     unsigned char clusterUuid[16]
*/
typedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1372
 * ctypedef nvmlConfComputeSetKeyRotationThresholdInfo_v1_t nvmlConfComputeSetKeyRotationThresholdInfo_t 'nvmlConfComputeSetKeyRotationThresholdInfo_t'
 * ctypedef nvmlConfComputeGetKeyRotationThresholdInfo_v1_t nvmlConfComputeGetKeyRotationThresholdInfo_t 'nvmlConfComputeGetKeyRotationThresholdInfo_t'
 * ctypedef struct nvmlGpuFabricInfo_t 'nvmlGpuFabricInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned char clusterUuid[16]
 *     nvmlReturn_t status
*/
struct nvmlGpuFabricInfo_t {
  unsigned char clusterUuid[16];
  nvmlReturn_t status;
  unsigned int cliqueId;
  nvmlGpuFabricState_t state;
};

/* "cuda/bindings/cy_nvml.pxd":1378
 *     nvmlGpuFabricState_t state
 * 
 * ctypedef struct nvmlGpuFabricInfo_v2_t 'nvmlGpuFabricInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned char clusterUuid[16]
*/
struct nvmlGpuFabricInfo_v2_t {
  unsigned int version;
  unsigned char clusterUuid[16];
  nvmlReturn_t status;
  unsigned int cliqueId;
  nvmlGpuFabricState_t state;
  unsigned int healthMask;
};

/* "cuda/bindings/cy_nvml.pxd":1386
 *     unsigned int healthMask
 * 
 * ctypedef struct nvmlGpuFabricInfo_v3_t 'nvmlGpuFabricInfo_v3_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned char clusterUuid[16]
*/
struct nvmlGpuFabricInfo_v3_t {
  unsigned int version;
  unsigned char clusterUuid[16];
  nvmlReturn_t status;
  unsigned int cliqueId;
  nvmlGpuFabricState_t state;
  unsigned int healthMask;
  unsigned char healthSummary;
};

/* "cuda/bindings/cy_nvml.pxd":1395
 *     unsigned char healthSummary
 * 
 * ctypedef nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_t 'nvmlSystemDriverBranchInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlTemperature_v1_t nvmlTemperature_t 'nvmlTemperature_t'
 * ctypedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t 'nvmlNvlinkSupportedBwModes_t'
*/
typedef nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1396
 * 
 * ctypedef nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_t 'nvmlSystemDriverBranchInfo_t'
 * ctypedef nvmlTemperature_v1_t nvmlTemperature_t 'nvmlTemperature_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t 'nvmlNvlinkSupportedBwModes_t'
 * ctypedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t 'nvmlNvlinkGetBwMode_t'
*/
typedef nvmlTemperature_v1_t nvmlTemperature_t;

/* "cuda/bindings/cy_nvml.pxd":1397
 * ctypedef nvmlSystemDriverBranchInfo_v1_t nvmlSystemDriverBranchInfo_t 'nvmlSystemDriverBranchInfo_t'
 * ctypedef nvmlTemperature_v1_t nvmlTemperature_t 'nvmlTemperature_t'
 * ctypedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t 'nvmlNvlinkSupportedBwModes_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t 'nvmlNvlinkGetBwMode_t'
 * ctypedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t 'nvmlNvlinkSetBwMode_t'
*/
typedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t;

/* "cuda/bindings/cy_nvml.pxd":1398
 * ctypedef nvmlTemperature_v1_t nvmlTemperature_t 'nvmlTemperature_t'
 * ctypedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t 'nvmlNvlinkSupportedBwModes_t'
 * ctypedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t 'nvmlNvlinkGetBwMode_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t 'nvmlNvlinkSetBwMode_t'
 * ctypedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t 'nvmlDeviceCapabilities_t'
*/
typedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t;

/* "cuda/bindings/cy_nvml.pxd":1399
 * ctypedef nvmlNvlinkSupportedBwModes_v1_t nvmlNvlinkSupportedBwModes_t 'nvmlNvlinkSupportedBwModes_t'
 * ctypedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t 'nvmlNvlinkGetBwMode_t'
 * ctypedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t 'nvmlNvlinkSetBwMode_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t 'nvmlDeviceCapabilities_t'
 * ctypedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t 'nvmlPowerSmoothingProfile_t'
*/
typedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t;

/* "cuda/bindings/cy_nvml.pxd":1400
 * ctypedef nvmlNvlinkGetBwMode_v1_t nvmlNvlinkGetBwMode_t 'nvmlNvlinkGetBwMode_t'
 * ctypedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t 'nvmlNvlinkSetBwMode_t'
 * ctypedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t 'nvmlDeviceCapabilities_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t 'nvmlPowerSmoothingProfile_t'
 * ctypedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t 'nvmlPowerSmoothingState_t'
*/
typedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t;

/* "cuda/bindings/cy_nvml.pxd":1401
 * ctypedef nvmlNvlinkSetBwMode_v1_t nvmlNvlinkSetBwMode_t 'nvmlNvlinkSetBwMode_t'
 * ctypedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t 'nvmlDeviceCapabilities_t'
 * ctypedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t 'nvmlPowerSmoothingProfile_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t 'nvmlPowerSmoothingState_t'
 * ctypedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t 'nvmlDeviceAddressingMode_t'
*/
typedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t;

/* "cuda/bindings/cy_nvml.pxd":1402
 * ctypedef nvmlDeviceCapabilities_v1_t nvmlDeviceCapabilities_t 'nvmlDeviceCapabilities_t'
 * ctypedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t 'nvmlPowerSmoothingProfile_t'
 * ctypedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t 'nvmlPowerSmoothingState_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t 'nvmlDeviceAddressingMode_t'
 * ctypedef nvmlRepairStatus_v1_t nvmlRepairStatus_t 'nvmlRepairStatus_t'
*/
typedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t;

/* "cuda/bindings/cy_nvml.pxd":1403
 * ctypedef nvmlPowerSmoothingProfile_v1_t nvmlPowerSmoothingProfile_t 'nvmlPowerSmoothingProfile_t'
 * ctypedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t 'nvmlPowerSmoothingState_t'
 * ctypedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t 'nvmlDeviceAddressingMode_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlRepairStatus_v1_t nvmlRepairStatus_t 'nvmlRepairStatus_t'
 * ctypedef nvmlPdi_v1_t nvmlPdi_t 'nvmlPdi_t'
*/
typedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t;

/* "cuda/bindings/cy_nvml.pxd":1404
 * ctypedef nvmlPowerSmoothingState_v1_t nvmlPowerSmoothingState_t 'nvmlPowerSmoothingState_t'
 * ctypedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t 'nvmlDeviceAddressingMode_t'
 * ctypedef nvmlRepairStatus_v1_t nvmlRepairStatus_t 'nvmlRepairStatus_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlPdi_v1_t nvmlPdi_t 'nvmlPdi_t'
 * ctypedef struct nvmlEventData_t 'nvmlEventData_t':
*/
typedef nvmlRepairStatus_v1_t nvmlRepairStatus_t;

/* "cuda/bindings/cy_nvml.pxd":1405
 * ctypedef nvmlDeviceAddressingMode_v1_t nvmlDeviceAddressingMode_t 'nvmlDeviceAddressingMode_t'
 * ctypedef nvmlRepairStatus_v1_t nvmlRepairStatus_t 'nvmlRepairStatus_t'
 * ctypedef nvmlPdi_v1_t nvmlPdi_t 'nvmlPdi_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlEventData_t 'nvmlEventData_t':
 *     nvmlDevice_t device
*/
typedef nvmlPdi_v1_t nvmlPdi_t;

/* "cuda/bindings/cy_nvml.pxd":1406
 * ctypedef nvmlRepairStatus_v1_t nvmlRepairStatus_t 'nvmlRepairStatus_t'
 * ctypedef nvmlPdi_v1_t nvmlPdi_t 'nvmlPdi_t'
 * ctypedef struct nvmlEventData_t 'nvmlEventData_t':             # <<<<<<<<<<<<<<
 *     nvmlDevice_t device
 *     unsigned long long eventType
*/
struct nvmlEventData_t {
  nvmlDevice_t device;
  unsigned PY_LONG_LONG eventType;
  unsigned PY_LONG_LONG eventData;
  unsigned int gpuInstanceId;
  unsigned int computeInstanceId;
};

/* "cuda/bindings/cy_nvml.pxd":1413
 *     unsigned int computeInstanceId
 * 
 * ctypedef struct nvmlSystemEventSetCreateRequest_v1_t 'nvmlSystemEventSetCreateRequest_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlSystemEventSet_t set
*/
struct nvmlSystemEventSetCreateRequest_v1_t {
  unsigned int version;
  nvmlSystemEventSet_t set;
};

/* "cuda/bindings/cy_nvml.pxd":1417
 *     nvmlSystemEventSet_t set
 * 
 * ctypedef struct nvmlSystemEventSetFreeRequest_v1_t 'nvmlSystemEventSetFreeRequest_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlSystemEventSet_t set
*/
struct nvmlSystemEventSetFreeRequest_v1_t {
  unsigned int version;
  nvmlSystemEventSet_t set;
};

/* "cuda/bindings/cy_nvml.pxd":1421
 *     nvmlSystemEventSet_t set
 * 
 * ctypedef struct nvmlSystemRegisterEventRequest_v1_t 'nvmlSystemRegisterEventRequest_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned long long eventTypes
*/
struct nvmlSystemRegisterEventRequest_v1_t {
  unsigned int version;
  unsigned PY_LONG_LONG eventTypes;
  nvmlSystemEventSet_t set;
};

/* "cuda/bindings/cy_nvml.pxd":1426
 *     nvmlSystemEventSet_t set
 * 
 * ctypedef struct nvmlExcludedDeviceInfo_t 'nvmlExcludedDeviceInfo_t':             # <<<<<<<<<<<<<<
 *     nvmlPciInfo_t pciInfo
 *     char uuid[80]
*/
struct nvmlExcludedDeviceInfo_t {
  nvmlPciInfo_t pciInfo;
  char uuid[80];
};

/* "cuda/bindings/cy_nvml.pxd":1430
 *     char uuid[80]
 * 
 * ctypedef struct nvmlProcessDetailList_v1_t 'nvmlProcessDetailList_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int mode
*/
struct nvmlProcessDetailList_v1_t {
  unsigned int version;
  unsigned int mode;
  unsigned int numProcArrayEntries;
  nvmlProcessDetail_v1_t *procArray;
};

/* "cuda/bindings/cy_nvml.pxd":1436
 *     nvmlProcessDetail_v1_t* procArray
 * 
 * ctypedef struct nvmlBridgeChipHierarchy_t 'nvmlBridgeChipHierarchy_t':             # <<<<<<<<<<<<<<
 *     unsigned char bridgeCount
 *     nvmlBridgeChipInfo_t bridgeChipInfo[128]
*/
struct nvmlBridgeChipHierarchy_t {
  unsigned char bridgeCount;
  nvmlBridgeChipInfo_t bridgeChipInfo[128];
};

/* "cuda/bindings/cy_nvml.pxd":1440
 *     nvmlBridgeChipInfo_t bridgeChipInfo[128]
 * 
 * ctypedef struct nvmlSample_t 'nvmlSample_t':             # <<<<<<<<<<<<<<
 *     unsigned long long timeStamp
 *     nvmlValue_t sampleValue
*/
struct nvmlSample_t {
  unsigned PY_LONG_LONG timeStamp;
  nvmlValue_t sampleValue;
};

/* "cuda/bindings/cy_nvml.pxd":1444
 *     nvmlValue_t sampleValue
 * 
 * ctypedef struct nvmlVgpuInstanceUtilizationSample_t 'nvmlVgpuInstanceUtilizationSample_t':             # <<<<<<<<<<<<<<
 *     nvmlVgpuInstance_t vgpuInstance
 *     unsigned long long timeStamp
*/
struct nvmlVgpuInstanceUtilizationSample_t {
  nvmlVgpuInstance_t vgpuInstance;
  unsigned PY_LONG_LONG timeStamp;
  nvmlValue_t smUtil;
  nvmlValue_t memUtil;
  nvmlValue_t encUtil;
  nvmlValue_t decUtil;
};

/* "cuda/bindings/cy_nvml.pxd":1452
 *     nvmlValue_t decUtil
 * 
 * ctypedef struct nvmlVgpuInstanceUtilizationInfo_v1_t 'nvmlVgpuInstanceUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned long long timeStamp
 *     nvmlVgpuInstance_t vgpuInstance
*/
struct nvmlVgpuInstanceUtilizationInfo_v1_t {
  unsigned PY_LONG_LONG timeStamp;
  nvmlVgpuInstance_t vgpuInstance;
  nvmlValue_t smUtil;
  nvmlValue_t memUtil;
  nvmlValue_t encUtil;
  nvmlValue_t decUtil;
  nvmlValue_t jpgUtil;
  nvmlValue_t ofaUtil;
};

/* "cuda/bindings/cy_nvml.pxd":1462
 *     nvmlValue_t ofaUtil
 * 
 * ctypedef struct nvmlFieldValue_t 'nvmlFieldValue_t':             # <<<<<<<<<<<<<<
 *     unsigned int fieldId
 *     unsigned int scopeId
*/
struct nvmlFieldValue_t {
  unsigned int fieldId;
  unsigned int scopeId;
  PY_LONG_LONG timestamp;
  PY_LONG_LONG latencyUsec;
  nvmlValueType_t valueType;
  nvmlReturn_t nvmlReturn;
  nvmlValue_t value;
};

/* "cuda/bindings/cy_nvml.pxd":1471
 *     nvmlValue_t value
 * 
 * ctypedef struct nvmlGpuThermalSettings_t 'nvmlGpuThermalSettings_t':             # <<<<<<<<<<<<<<
 *     unsigned int count
 *     _anon_pod0 sensor[3]
*/
struct nvmlGpuThermalSettings_t {
  unsigned int count;
  _anon_pod0 sensor[3];
};

/* "cuda/bindings/cy_nvml.pxd":1475
 *     _anon_pod0 sensor[3]
 * 
 * ctypedef struct nvmlUUID_v1_t 'nvmlUUID_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int type
*/
struct nvmlUUID_v1_t {
  unsigned int version;
  unsigned int type;
  nvmlUUIDValue_t value;
};

/* "cuda/bindings/cy_nvml.pxd":1480
 *     nvmlUUIDValue_t value
 * 
 * ctypedef struct nvmlClkMonStatus_t 'nvmlClkMonStatus_t':             # <<<<<<<<<<<<<<
 *     unsigned int bGlobalStatus
 *     unsigned int clkMonListSize
*/
struct nvmlClkMonStatus_t {
  unsigned int bGlobalStatus;
  unsigned int clkMonListSize;
  nvmlClkMonFaultInfo_t clkMonList[32];
};

/* "cuda/bindings/cy_nvml.pxd":1485
 *     nvmlClkMonFaultInfo_t clkMonList[32]
 * 
 * ctypedef struct nvmlProcessesUtilizationInfo_v1_t 'nvmlProcessesUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int processSamplesCount
*/
struct nvmlProcessesUtilizationInfo_v1_t {
  unsigned int version;
  unsigned int processSamplesCount;
  unsigned PY_LONG_LONG lastSeenTimeStamp;
  nvmlProcessUtilizationInfo_v1_t *procUtilArray;
};

/* "cuda/bindings/cy_nvml.pxd":1491
 *     nvmlProcessUtilizationInfo_v1_t* procUtilArray
 * 
 * ctypedef struct nvmlGpuDynamicPstatesInfo_t 'nvmlGpuDynamicPstatesInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned int flags
 *     _anon_pod1 utilization[8]
*/
struct nvmlGpuDynamicPstatesInfo_t {
  unsigned int flags;
  _anon_pod1 utilization[8];
};

/* "cuda/bindings/cy_nvml.pxd":1495
 *     _anon_pod1 utilization[8]
 * 
 * ctypedef union nvmlVgpuSchedulerParams_t 'nvmlVgpuSchedulerParams_t':             # <<<<<<<<<<<<<<
 *     _anon_pod2 vgpuSchedDataWithARR
 *     _anon_pod3 vgpuSchedData
*/
union nvmlVgpuSchedulerParams_t {
  _anon_pod2 vgpuSchedDataWithARR;
  _anon_pod3 vgpuSchedData;
};

/* "cuda/bindings/cy_nvml.pxd":1499
 *     _anon_pod3 vgpuSchedData
 * 
 * ctypedef union nvmlVgpuSchedulerSetParams_t 'nvmlVgpuSchedulerSetParams_t':             # <<<<<<<<<<<<<<
 *     _anon_pod4 vgpuSchedDataWithARR
 *     _anon_pod5 vgpuSchedData
*/
union nvmlVgpuSchedulerSetParams_t {
  _anon_pod4 vgpuSchedDataWithARR;
  _anon_pod5 vgpuSchedData;
};

/* "cuda/bindings/cy_nvml.pxd":1503
 *     _anon_pod5 vgpuSchedData
 * 
 * ctypedef struct nvmlVgpuLicenseInfo_t 'nvmlVgpuLicenseInfo_t':             # <<<<<<<<<<<<<<
 *     unsigned char isLicensed
 *     nvmlVgpuLicenseExpiry_t licenseExpiry
*/
struct nvmlVgpuLicenseInfo_t {
  unsigned char isLicensed;
  nvmlVgpuLicenseExpiry_t licenseExpiry;
  unsigned int currentState;
};

/* "cuda/bindings/cy_nvml.pxd":1508
 *     unsigned int currentState
 * 
 * ctypedef struct nvmlGridLicensableFeature_t 'nvmlGridLicensableFeature_t':             # <<<<<<<<<<<<<<
 *     nvmlGridLicenseFeatureCode_t featureCode
 *     unsigned int featureState
*/
struct nvmlGridLicensableFeature_t {
  nvmlGridLicenseFeatureCode_t featureCode;
  unsigned int featureState;
  char licenseInfo[128];
  char productName[128];
  unsigned int featureEnabled;
  nvmlGridLicenseExpiry_t licenseExpiry;
};

/* "cuda/bindings/cy_nvml.pxd":1516
 *     nvmlGridLicenseExpiry_t licenseExpiry
 * 
 * ctypedef struct nvmlUnitFanSpeeds_t 'nvmlUnitFanSpeeds_t':             # <<<<<<<<<<<<<<
 *     nvmlUnitFanInfo_t fans[24]
 *     unsigned int count
*/
struct nvmlUnitFanSpeeds_t {
  nvmlUnitFanInfo_t fans[24];
  unsigned int count;
};

/* "cuda/bindings/cy_nvml.pxd":1520
 *     unsigned int count
 * 
 * ctypedef struct nvmlSystemEventSetWaitRequest_v1_t 'nvmlSystemEventSetWaitRequest_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int timeoutms
*/
struct nvmlSystemEventSetWaitRequest_v1_t {
  unsigned int version;
  unsigned int timeoutms;
  nvmlSystemEventSet_t set;
  nvmlSystemEventData_v1_t *data;
  unsigned int dataSize;
  unsigned int numEvent;
};

/* "cuda/bindings/cy_nvml.pxd":1528
 *     unsigned int numEvent
 * 
 * ctypedef struct nvmlVgpuPgpuMetadata_t 'nvmlVgpuPgpuMetadata_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int revision
*/
struct nvmlVgpuPgpuMetadata_t {
  unsigned int version;
  unsigned int revision;
  char hostDriverVersion[80];
  unsigned int pgpuVirtualizationCaps;
  unsigned int reserved[5];
  nvmlVgpuVersion_t hostSupportedVgpuRange;
  unsigned int opaqueDataSize;
  char opaqueData[4];
};

/* "cuda/bindings/cy_nvml.pxd":1538
 *     char opaqueData[4]
 * 
 * ctypedef struct nvmlGpuInstanceInfo_t 'nvmlGpuInstanceInfo_t':             # <<<<<<<<<<<<<<
 *     nvmlDevice_t device
 *     unsigned int id
*/
struct nvmlGpuInstanceInfo_t {
  nvmlDevice_t device;
  unsigned int id;
  unsigned int profileId;
  nvmlGpuInstancePlacement_t placement;
};

/* "cuda/bindings/cy_nvml.pxd":1544
 *     nvmlGpuInstancePlacement_t placement
 * 
 * ctypedef struct nvmlComputeInstanceInfo_t 'nvmlComputeInstanceInfo_t':             # <<<<<<<<<<<<<<
 *     nvmlDevice_t device
 *     nvmlGpuInstance_t gpuInstance
*/
struct nvmlComputeInstanceInfo_t {
  nvmlDevice_t device;
  nvmlGpuInstance_t gpuInstance;
  unsigned int id;
  unsigned int profileId;
  nvmlComputeInstancePlacement_t placement;
};

/* "cuda/bindings/cy_nvml.pxd":1551
 *     nvmlComputeInstancePlacement_t placement
 * 
 * ctypedef struct nvmlGpmMetric_t 'nvmlGpmMetric_t':             # <<<<<<<<<<<<<<
 *     unsigned int metricId
 *     nvmlReturn_t nvmlReturn
*/
struct nvmlGpmMetric_t {
  unsigned int metricId;
  nvmlReturn_t nvmlReturn;
  double value;
  _anon_pod6 metricInfo;
};

/* "cuda/bindings/cy_nvml.pxd":1557
 *     _anon_pod6 metricInfo
 * 
 * ctypedef struct nvmlWorkloadPowerProfileInfo_v1_t 'nvmlWorkloadPowerProfileInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int profileId
*/
struct nvmlWorkloadPowerProfileInfo_v1_t {
  unsigned int version;
  unsigned int profileId;
  unsigned int priority;
  nvmlMask255_t conflictingMask;
};

/* "cuda/bindings/cy_nvml.pxd":1563
 *     nvmlMask255_t conflictingMask
 * 
 * ctypedef struct nvmlWorkloadPowerProfileCurrentProfiles_v1_t 'nvmlWorkloadPowerProfileCurrentProfiles_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlMask255_t perfProfilesMask
*/
struct nvmlWorkloadPowerProfileCurrentProfiles_v1_t {
  unsigned int version;
  nvmlMask255_t perfProfilesMask;
  nvmlMask255_t requestedProfilesMask;
  nvmlMask255_t enforcedProfilesMask;
};

/* "cuda/bindings/cy_nvml.pxd":1569
 *     nvmlMask255_t enforcedProfilesMask
 * 
 * ctypedef struct nvmlWorkloadPowerProfileRequestedProfiles_v1_t 'nvmlWorkloadPowerProfileRequestedProfiles_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlMask255_t requestedProfilesMask
*/
struct nvmlWorkloadPowerProfileRequestedProfiles_v1_t {
  unsigned int version;
  nvmlMask255_t requestedProfilesMask;
};

/* "cuda/bindings/cy_nvml.pxd":1573
 *     nvmlMask255_t requestedProfilesMask
 * 
 * ctypedef struct nvmlEccSramUniqueUncorrectedErrorCounts_v1_t 'nvmlEccSramUniqueUncorrectedErrorCounts_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int entryCount
*/
struct nvmlEccSramUniqueUncorrectedErrorCounts_v1_t {
  unsigned int version;
  unsigned int entryCount;
  nvmlEccSramUniqueUncorrectedErrorEntry_v1_t *entries;
};

/* "cuda/bindings/cy_nvml.pxd":1578
 *     nvmlEccSramUniqueUncorrectedErrorEntry_v1_t* entries
 * 
 * ctypedef struct nvmlNvlinkFirmwareInfo_t 'nvmlNvlinkFirmwareInfo_t':             # <<<<<<<<<<<<<<
 *     nvmlNvlinkFirmwareVersion_t firmwareVersion[100]
 *     unsigned int numValidEntries
*/
struct nvmlNvlinkFirmwareInfo_t {
  nvmlNvlinkFirmwareVersion_t firmwareVersion[100];
  unsigned int numValidEntries;
};

/* "cuda/bindings/cy_nvml.pxd":1582
 *     unsigned int numValidEntries
 * 
 * ctypedef struct nvmlPRMTLV_v1_t 'nvmlPRMTLV_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned dataSize
 *     unsigned status
*/
struct nvmlPRMTLV_v1_t {
  unsigned int dataSize;
  unsigned int status;
  _anon_pod7 _anon_pod_member0;
};

/* "cuda/bindings/cy_nvml.pxd":1587
 *     _anon_pod7 _anon_pod_member0
 * 
 * ctypedef nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_t 'nvmlVgpuTypeIdInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t 'nvmlVgpuTypeMaxInstance_t'
 * ctypedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t 'nvmlVgpuCreatablePlacementInfo_t'
*/
typedef nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1588
 * 
 * ctypedef nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_t 'nvmlVgpuTypeIdInfo_t'
 * ctypedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t 'nvmlVgpuTypeMaxInstance_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t 'nvmlVgpuCreatablePlacementInfo_t'
 * ctypedef struct nvmlVgpuProcessesUtilizationInfo_v1_t 'nvmlVgpuProcessesUtilizationInfo_v1_t':
*/
typedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t;

/* "cuda/bindings/cy_nvml.pxd":1589
 * ctypedef nvmlVgpuTypeIdInfo_v1_t nvmlVgpuTypeIdInfo_t 'nvmlVgpuTypeIdInfo_t'
 * ctypedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t 'nvmlVgpuTypeMaxInstance_t'
 * ctypedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t 'nvmlVgpuCreatablePlacementInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlVgpuProcessesUtilizationInfo_v1_t 'nvmlVgpuProcessesUtilizationInfo_v1_t':
 *     unsigned int version
*/
typedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1590
 * ctypedef nvmlVgpuTypeMaxInstance_v1_t nvmlVgpuTypeMaxInstance_t 'nvmlVgpuTypeMaxInstance_t'
 * ctypedef nvmlVgpuCreatablePlacementInfo_v1_t nvmlVgpuCreatablePlacementInfo_t 'nvmlVgpuCreatablePlacementInfo_t'
 * ctypedef struct nvmlVgpuProcessesUtilizationInfo_v1_t 'nvmlVgpuProcessesUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int vgpuProcessCount
*/
struct nvmlVgpuProcessesUtilizationInfo_v1_t {
  unsigned int version;
  unsigned int vgpuProcessCount;
  unsigned PY_LONG_LONG lastSeenTimeStamp;
  nvmlVgpuProcessUtilizationInfo_v1_t *vgpuProcUtilArray;
};

/* "cuda/bindings/cy_nvml.pxd":1596
 *     nvmlVgpuProcessUtilizationInfo_v1_t* vgpuProcUtilArray
 * 
 * ctypedef nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_t 'nvmlActiveVgpuInstanceInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfoV_t 'nvmlGpuFabricInfoV_t'
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t 'nvmlSystemEventSetCreateRequest_t'
*/
typedef nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1597
 * 
 * ctypedef nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_t 'nvmlActiveVgpuInstanceInfo_t'
 * ctypedef nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfoV_t 'nvmlGpuFabricInfoV_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t 'nvmlSystemEventSetCreateRequest_t'
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t 'nvmlSystemEventSetFreeRequest_t'
*/
typedef nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfoV_t;

/* "cuda/bindings/cy_nvml.pxd":1598
 * ctypedef nvmlActiveVgpuInstanceInfo_v1_t nvmlActiveVgpuInstanceInfo_t 'nvmlActiveVgpuInstanceInfo_t'
 * ctypedef nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfoV_t 'nvmlGpuFabricInfoV_t'
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t 'nvmlSystemEventSetCreateRequest_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t 'nvmlSystemEventSetFreeRequest_t'
 * ctypedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t 'nvmlSystemRegisterEventRequest_t'
*/
typedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t;

/* "cuda/bindings/cy_nvml.pxd":1599
 * ctypedef nvmlGpuFabricInfo_v3_t nvmlGpuFabricInfoV_t 'nvmlGpuFabricInfoV_t'
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t 'nvmlSystemEventSetCreateRequest_t'
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t 'nvmlSystemEventSetFreeRequest_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t 'nvmlSystemRegisterEventRequest_t'
 * ctypedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t 'nvmlProcessDetailList_t'
*/
typedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t;

/* "cuda/bindings/cy_nvml.pxd":1600
 * ctypedef nvmlSystemEventSetCreateRequest_v1_t nvmlSystemEventSetCreateRequest_t 'nvmlSystemEventSetCreateRequest_t'
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t 'nvmlSystemEventSetFreeRequest_t'
 * ctypedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t 'nvmlSystemRegisterEventRequest_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t 'nvmlProcessDetailList_t'
 * ctypedef struct nvmlVgpuInstancesUtilizationInfo_v1_t 'nvmlVgpuInstancesUtilizationInfo_v1_t':
*/
typedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t;

/* "cuda/bindings/cy_nvml.pxd":1601
 * ctypedef nvmlSystemEventSetFreeRequest_v1_t nvmlSystemEventSetFreeRequest_t 'nvmlSystemEventSetFreeRequest_t'
 * ctypedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t 'nvmlSystemRegisterEventRequest_t'
 * ctypedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t 'nvmlProcessDetailList_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlVgpuInstancesUtilizationInfo_v1_t 'nvmlVgpuInstancesUtilizationInfo_v1_t':
 *     unsigned int version
*/
typedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t;

/* "cuda/bindings/cy_nvml.pxd":1602
 * ctypedef nvmlSystemRegisterEventRequest_v1_t nvmlSystemRegisterEventRequest_t 'nvmlSystemRegisterEventRequest_t'
 * ctypedef nvmlProcessDetailList_v1_t nvmlProcessDetailList_t 'nvmlProcessDetailList_t'
 * ctypedef struct nvmlVgpuInstancesUtilizationInfo_v1_t 'nvmlVgpuInstancesUtilizationInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlValueType_t sampleValType
*/
struct nvmlVgpuInstancesUtilizationInfo_v1_t {
  unsigned int version;
  nvmlValueType_t sampleValType;
  unsigned int vgpuInstanceCount;
  unsigned PY_LONG_LONG lastSeenTimeStamp;
  nvmlVgpuInstanceUtilizationInfo_v1_t *vgpuUtilArray;
};

/* "cuda/bindings/cy_nvml.pxd":1609
 *     nvmlVgpuInstanceUtilizationInfo_v1_t* vgpuUtilArray
 * 
 * ctypedef nvmlUUID_v1_t nvmlUUID_t 'nvmlUUID_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_t 'nvmlProcessesUtilizationInfo_t'
 * ctypedef struct nvmlVgpuSchedulerLog_t 'nvmlVgpuSchedulerLog_t':
*/
typedef nvmlUUID_v1_t nvmlUUID_t;

/* "cuda/bindings/cy_nvml.pxd":1610
 * 
 * ctypedef nvmlUUID_v1_t nvmlUUID_t 'nvmlUUID_t'
 * ctypedef nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_t 'nvmlProcessesUtilizationInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlVgpuSchedulerLog_t 'nvmlVgpuSchedulerLog_t':
 *     unsigned int engineId
*/
typedef nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1611
 * ctypedef nvmlUUID_v1_t nvmlUUID_t 'nvmlUUID_t'
 * ctypedef nvmlProcessesUtilizationInfo_v1_t nvmlProcessesUtilizationInfo_t 'nvmlProcessesUtilizationInfo_t'
 * ctypedef struct nvmlVgpuSchedulerLog_t 'nvmlVgpuSchedulerLog_t':             # <<<<<<<<<<<<<<
 *     unsigned int engineId
 *     unsigned int schedulerPolicy
*/
struct nvmlVgpuSchedulerLog_t {
  unsigned int engineId;
  unsigned int schedulerPolicy;
  unsigned int arrMode;
  nvmlVgpuSchedulerParams_t schedulerParams;
  unsigned int entriesCount;
  nvmlVgpuSchedulerLogEntry_t logEntries[200];
};

/* "cuda/bindings/cy_nvml.pxd":1619
 *     nvmlVgpuSchedulerLogEntry_t logEntries[200]
 * 
 * ctypedef struct nvmlVgpuSchedulerGetState_t 'nvmlVgpuSchedulerGetState_t':             # <<<<<<<<<<<<<<
 *     unsigned int schedulerPolicy
 *     unsigned int arrMode
*/
struct nvmlVgpuSchedulerGetState_t {
  unsigned int schedulerPolicy;
  unsigned int arrMode;
  nvmlVgpuSchedulerParams_t schedulerParams;
};

/* "cuda/bindings/cy_nvml.pxd":1624
 *     nvmlVgpuSchedulerParams_t schedulerParams
 * 
 * ctypedef struct nvmlVgpuSchedulerStateInfo_v1_t 'nvmlVgpuSchedulerStateInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int engineId
*/
struct nvmlVgpuSchedulerStateInfo_v1_t {
  unsigned int version;
  unsigned int engineId;
  unsigned int schedulerPolicy;
  unsigned int arrMode;
  nvmlVgpuSchedulerParams_t schedulerParams;
};

/* "cuda/bindings/cy_nvml.pxd":1631
 *     nvmlVgpuSchedulerParams_t schedulerParams
 * 
 * ctypedef struct nvmlVgpuSchedulerLogInfo_v1_t 'nvmlVgpuSchedulerLogInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int engineId
*/
struct nvmlVgpuSchedulerLogInfo_v1_t {
  unsigned int version;
  unsigned int engineId;
  unsigned int schedulerPolicy;
  unsigned int arrMode;
  nvmlVgpuSchedulerParams_t schedulerParams;
  unsigned int entriesCount;
  nvmlVgpuSchedulerLogEntry_t logEntries[200];
};

/* "cuda/bindings/cy_nvml.pxd":1640
 *     nvmlVgpuSchedulerLogEntry_t logEntries[200]
 * 
 * ctypedef struct nvmlVgpuSchedulerSetState_t 'nvmlVgpuSchedulerSetState_t':             # <<<<<<<<<<<<<<
 *     unsigned int schedulerPolicy
 *     unsigned int enableARRMode
*/
struct nvmlVgpuSchedulerSetState_t {
  unsigned int schedulerPolicy;
  unsigned int enableARRMode;
  nvmlVgpuSchedulerSetParams_t schedulerParams;
};

/* "cuda/bindings/cy_nvml.pxd":1645
 *     nvmlVgpuSchedulerSetParams_t schedulerParams
 * 
 * ctypedef struct nvmlVgpuSchedulerState_v1_t 'nvmlVgpuSchedulerState_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int engineId
*/
struct nvmlVgpuSchedulerState_v1_t {
  unsigned int version;
  unsigned int engineId;
  unsigned int schedulerPolicy;
  unsigned int enableARRMode;
  nvmlVgpuSchedulerSetParams_t schedulerParams;
};

/* "cuda/bindings/cy_nvml.pxd":1652
 *     nvmlVgpuSchedulerSetParams_t schedulerParams
 * 
 * ctypedef struct nvmlGridLicensableFeatures_t 'nvmlGridLicensableFeatures_t':             # <<<<<<<<<<<<<<
 *     int isGridLicenseSupported
 *     unsigned int licensableFeaturesCount
*/
struct nvmlGridLicensableFeatures_t {
  int isGridLicenseSupported;
  unsigned int licensableFeaturesCount;
  nvmlGridLicensableFeature_t gridLicensableFeatures[3];
};

/* "cuda/bindings/cy_nvml.pxd":1657
 *     nvmlGridLicensableFeature_t gridLicensableFeatures[3]
 * 
 * ctypedef nvmlSystemEventSetWaitRequest_v1_t nvmlSystemEventSetWaitRequest_t 'nvmlSystemEventSetWaitRequest_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlGpmMetricsGet_t 'nvmlGpmMetricsGet_t':
 *     unsigned int version
*/
typedef nvmlSystemEventSetWaitRequest_v1_t nvmlSystemEventSetWaitRequest_t;

/* "cuda/bindings/cy_nvml.pxd":1658
 * 
 * ctypedef nvmlSystemEventSetWaitRequest_v1_t nvmlSystemEventSetWaitRequest_t 'nvmlSystemEventSetWaitRequest_t'
 * ctypedef struct nvmlGpmMetricsGet_t 'nvmlGpmMetricsGet_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int numMetrics
*/
struct nvmlGpmMetricsGet_t {
  unsigned int version;
  unsigned int numMetrics;
  nvmlGpmSample_t sample1;
  nvmlGpmSample_t sample2;
  nvmlGpmMetric_t metrics[210];
};

/* "cuda/bindings/cy_nvml.pxd":1665
 *     nvmlGpmMetric_t metrics[210]
 * 
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t nvmlWorkloadPowerProfileInfo_t 'nvmlWorkloadPowerProfileInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_t 'nvmlWorkloadPowerProfileCurrentProfiles_t'
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t 'nvmlWorkloadPowerProfileRequestedProfiles_t'
*/
typedef nvmlWorkloadPowerProfileInfo_v1_t nvmlWorkloadPowerProfileInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1666
 * 
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t nvmlWorkloadPowerProfileInfo_t 'nvmlWorkloadPowerProfileInfo_t'
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_t 'nvmlWorkloadPowerProfileCurrentProfiles_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t 'nvmlWorkloadPowerProfileRequestedProfiles_t'
 * ctypedef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_t 'nvmlEccSramUniqueUncorrectedErrorCounts_t'
*/
typedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_t;

/* "cuda/bindings/cy_nvml.pxd":1667
 * ctypedef nvmlWorkloadPowerProfileInfo_v1_t nvmlWorkloadPowerProfileInfo_t 'nvmlWorkloadPowerProfileInfo_t'
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_t 'nvmlWorkloadPowerProfileCurrentProfiles_t'
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t 'nvmlWorkloadPowerProfileRequestedProfiles_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_t 'nvmlEccSramUniqueUncorrectedErrorCounts_t'
 * ctypedef struct nvmlNvLinkInfo_v2_t 'nvmlNvLinkInfo_v2_t':
*/
typedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t;

/* "cuda/bindings/cy_nvml.pxd":1668
 * ctypedef nvmlWorkloadPowerProfileCurrentProfiles_v1_t nvmlWorkloadPowerProfileCurrentProfiles_t 'nvmlWorkloadPowerProfileCurrentProfiles_t'
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t 'nvmlWorkloadPowerProfileRequestedProfiles_t'
 * ctypedef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_t 'nvmlEccSramUniqueUncorrectedErrorCounts_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlNvLinkInfo_v2_t 'nvmlNvLinkInfo_v2_t':
 *     unsigned int version
*/
typedef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_t;

/* "cuda/bindings/cy_nvml.pxd":1669
 * ctypedef nvmlWorkloadPowerProfileRequestedProfiles_v1_t nvmlWorkloadPowerProfileRequestedProfiles_t 'nvmlWorkloadPowerProfileRequestedProfiles_t'
 * ctypedef nvmlEccSramUniqueUncorrectedErrorCounts_v1_t nvmlEccSramUniqueUncorrectedErrorCounts_t 'nvmlEccSramUniqueUncorrectedErrorCounts_t'
 * ctypedef struct nvmlNvLinkInfo_v2_t 'nvmlNvLinkInfo_v2_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     unsigned int isNvleEnabled
*/
struct nvmlNvLinkInfo_v2_t {
  unsigned int version;
  unsigned int isNvleEnabled;
  nvmlNvlinkFirmwareInfo_t firmwareInfo;
};

/* "cuda/bindings/cy_nvml.pxd":1674
 *     nvmlNvlinkFirmwareInfo_t firmwareInfo
 * 
 * ctypedef nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_t 'nvmlVgpuProcessesUtilizationInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t 'nvmlVgpuInstancesUtilizationInfo_t'
 * ctypedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t 'nvmlVgpuSchedulerStateInfo_t'
*/
typedef nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1675
 * 
 * ctypedef nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_t 'nvmlVgpuProcessesUtilizationInfo_t'
 * ctypedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t 'nvmlVgpuInstancesUtilizationInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t 'nvmlVgpuSchedulerStateInfo_t'
 * ctypedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t 'nvmlVgpuSchedulerLogInfo_t'
*/
typedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1676
 * ctypedef nvmlVgpuProcessesUtilizationInfo_v1_t nvmlVgpuProcessesUtilizationInfo_t 'nvmlVgpuProcessesUtilizationInfo_t'
 * ctypedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t 'nvmlVgpuInstancesUtilizationInfo_t'
 * ctypedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t 'nvmlVgpuSchedulerStateInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t 'nvmlVgpuSchedulerLogInfo_t'
 * ctypedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t 'nvmlVgpuSchedulerState_t'
*/
typedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1677
 * ctypedef nvmlVgpuInstancesUtilizationInfo_v1_t nvmlVgpuInstancesUtilizationInfo_t 'nvmlVgpuInstancesUtilizationInfo_t'
 * ctypedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t 'nvmlVgpuSchedulerStateInfo_t'
 * ctypedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t 'nvmlVgpuSchedulerLogInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t 'nvmlVgpuSchedulerState_t'
 * ctypedef struct nvmlWorkloadPowerProfileProfilesInfo_v1_t 'nvmlWorkloadPowerProfileProfilesInfo_v1_t':
*/
typedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1678
 * ctypedef nvmlVgpuSchedulerStateInfo_v1_t nvmlVgpuSchedulerStateInfo_t 'nvmlVgpuSchedulerStateInfo_t'
 * ctypedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t 'nvmlVgpuSchedulerLogInfo_t'
 * ctypedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t 'nvmlVgpuSchedulerState_t'             # <<<<<<<<<<<<<<
 * ctypedef struct nvmlWorkloadPowerProfileProfilesInfo_v1_t 'nvmlWorkloadPowerProfileProfilesInfo_v1_t':
 *     unsigned int version
*/
typedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t;

/* "cuda/bindings/cy_nvml.pxd":1679
 * ctypedef nvmlVgpuSchedulerLogInfo_v1_t nvmlVgpuSchedulerLogInfo_t 'nvmlVgpuSchedulerLogInfo_t'
 * ctypedef nvmlVgpuSchedulerState_v1_t nvmlVgpuSchedulerState_t 'nvmlVgpuSchedulerState_t'
 * ctypedef struct nvmlWorkloadPowerProfileProfilesInfo_v1_t 'nvmlWorkloadPowerProfileProfilesInfo_v1_t':             # <<<<<<<<<<<<<<
 *     unsigned int version
 *     nvmlMask255_t perfProfilesMask
*/
struct nvmlWorkloadPowerProfileProfilesInfo_v1_t {
  unsigned int version;
  nvmlMask255_t perfProfilesMask;
  nvmlWorkloadPowerProfileInfo_t perfProfile[255];
};

/* "cuda/bindings/cy_nvml.pxd":1684
 *     nvmlWorkloadPowerProfileInfo_t perfProfile[255]
 * 
 * ctypedef nvmlNvLinkInfo_v2_t nvmlNvLinkInfo_t 'nvmlNvLinkInfo_t'             # <<<<<<<<<<<<<<
 * ctypedef nvmlWorkloadPowerProfileProfilesInfo_v1_t nvmlWorkloadPowerProfileProfilesInfo_t 'nvmlWorkloadPowerProfileProfilesInfo_t'
 * 
*/
typedef nvmlNvLinkInfo_v2_t nvmlNvLinkInfo_t;

/* "cuda/bindings/cy_nvml.pxd":1685
 * 
 * ctypedef nvmlNvLinkInfo_v2_t nvmlNvLinkInfo_t 'nvmlNvLinkInfo_t'
 * ctypedef nvmlWorkloadPowerProfileProfilesInfo_v1_t nvmlWorkloadPowerProfileProfilesInfo_t 'nvmlWorkloadPowerProfileProfilesInfo_t'             # <<<<<<<<<<<<<<
 * 
 * 
*/
typedef nvmlWorkloadPowerProfileProfilesInfo_v1_t nvmlWorkloadPowerProfileProfilesInfo_t;
/* #### Code section: utility_code_proto ### */

/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
  #define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
  typedef struct {
    void (*INCREF)(void*, PyObject*, Py_ssize_t);
    void (*DECREF)(void*, PyObject*, Py_ssize_t);
    void (*GOTREF)(void*, PyObject*, Py_ssize_t);
    void (*GIVEREF)(void*, PyObject*, Py_ssize_t);
    void* (*SetupContext)(const char*, Py_ssize_t, const char*);
    void (*FinishContext)(void**);
  } __Pyx_RefNannyAPIStruct;
  static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
  static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
  #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
  #define __Pyx_RefNannySetupContext(name, acquire_gil)\
          if (acquire_gil) {\
              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\
              PyGILState_Release(__pyx_gilstate_save);\
          } else {\
              __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\
          }
  #define __Pyx_RefNannyFinishContextNogil() {\
              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
              __Pyx_RefNannyFinishContext();\
              PyGILState_Release(__pyx_gilstate_save);\
          }
  #define __Pyx_RefNannyFinishContextNogil() {\
              PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
              __Pyx_RefNannyFinishContext();\
              PyGILState_Release(__pyx_gilstate_save);\
          }
  #define __Pyx_RefNannyFinishContext()\
          __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
  #define __Pyx_INCREF(r)  __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
  #define __Pyx_DECREF(r)  __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
  #define __Pyx_GOTREF(r)  __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
  #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__))
  #define __Pyx_XINCREF(r)  do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0)
  #define __Pyx_XDECREF(r)  do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0)
  #define __Pyx_XGOTREF(r)  do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0)
  #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0)
#else
  #define __Pyx_RefNannyDeclarations
  #define __Pyx_RefNannySetupContext(name, acquire_gil)
  #define __Pyx_RefNannyFinishContextNogil()
  #define __Pyx_RefNannyFinishContext()
  #define __Pyx_INCREF(r) Py_INCREF(r)
  #define __Pyx_DECREF(r) Py_DECREF(r)
  #define __Pyx_GOTREF(r)
  #define __Pyx_GIVEREF(r)
  #define __Pyx_XINCREF(r) Py_XINCREF(r)
  #define __Pyx_XDECREF(r) Py_XDECREF(r)
  #define __Pyx_XGOTREF(r)
  #define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_Py_XDECREF_SET(r, v) do {\
        PyObject *tmp = (PyObject *) r;\
        r = v; Py_XDECREF(tmp);\
    } while (0)
#define __Pyx_XDECREF_SET(r, v) do {\
        PyObject *tmp = (PyObject *) r;\
        r = v; __Pyx_XDECREF(tmp);\
    } while (0)
#define __Pyx_DECREF_SET(r, v) do {\
        PyObject *tmp = (PyObject *) r;\
        r = v; __Pyx_DECREF(tmp);\
    } while (0)
#define __Pyx_CLEAR(r)    do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r)   do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)

/* ErrOccurredWithGIL.proto */
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void);

/* FunctionExport.proto */
static int __Pyx_ExportFunction(PyObject *api_dict, const char *name, void (*f)(void), const char *sig);

/* GetApiDict.proto */
static PyObject *__Pyx_ApiExport_GetApiDict(void);

/* IncludeStringH.proto */
#include <string.h>

/* FunctionImport.proto */
static int __Pyx_ImportFunction_3_2_2(PyObject *module, const char *funcname, void (**f)(void), const char *sig);

/* dict_setdefault.proto (used by CLineInTraceback) */
static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value);

/* PyDictVersioning.proto (used by CLineInTraceback) */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT  ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict)  (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
    (version_var) = __PYX_GET_DICT_VERSION(dict);\
    (cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
    static PY_UINT64_T __pyx_dict_version = 0;\
    static PyObject *__pyx_dict_cached_value = NULL;\
    if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
        (VAR) = __Pyx_XNewRef(__pyx_dict_cached_value);\
    } else {\
        (VAR) = __pyx_dict_cached_value = (LOOKUP);\
        __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
    }\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict)  (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP)  (VAR) = (LOOKUP);
#endif

/* PyErrExceptionMatches.proto (used by PyObjectGetAttrStrNoError) */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err)  PyErr_ExceptionMatches(err)
#endif

/* PyThreadStateGet.proto (used by PyErrFetchRestore) */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare  PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign  __pyx_tstate = __Pyx_PyThreadState_Current;
#if PY_VERSION_HEX >= 0x030C00A6
#define __Pyx_PyErr_Occurred()  (__pyx_tstate->current_exception != NULL)
#define __Pyx_PyErr_CurrentExceptionType()  (__pyx_tstate->current_exception ? (PyObject*) Py_TYPE(__pyx_tstate->current_exception) : (PyObject*) NULL)
#else
#define __Pyx_PyErr_Occurred()  (__pyx_tstate->curexc_type != NULL)
#define __Pyx_PyErr_CurrentExceptionType()  (__pyx_tstate->curexc_type)
#endif
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred()  (PyErr_Occurred() != NULL)
#define __Pyx_PyErr_CurrentExceptionType()  PyErr_Occurred()
#endif

/* PyErrFetchRestore.proto (used by PyObjectGetAttrStrNoError) */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb)  __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb)    __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb)  __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb)    __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A6
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb)  PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb)  PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb)  PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb)  PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb)  PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb)  PyErr_Fetch(type, value, tb)
#endif

/* PyObjectGetAttrStr.proto (used by PyObjectGetAttrStrNoError) */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif

/* PyObjectGetAttrStrNoError.proto (used by CLineInTraceback) */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);

/* CLineInTraceback.proto (used by AddTraceback) */
#if CYTHON_CLINE_IN_TRACEBACK && CYTHON_CLINE_IN_TRACEBACK_RUNTIME
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#else
#define __Pyx_CLineForTraceback(tstate, c_line)  (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#endif

/* CodeObjectCache.proto (used by AddTraceback) */
#if CYTHON_COMPILING_IN_LIMITED_API
typedef PyObject __Pyx_CachedCodeObjectType;
#else
typedef PyCodeObject __Pyx_CachedCodeObjectType;
#endif
typedef struct {
    __Pyx_CachedCodeObjectType* code_object;
    int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
    int count;
    int max_count;
    __Pyx_CodeObjectCacheEntry* entries;
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    __pyx_atomic_int_type accessor_count;
  #endif
};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static __Pyx_CachedCodeObjectType *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, __Pyx_CachedCodeObjectType* code_object);

/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
                               int py_line, const char *filename);

/* FormatTypeName.proto */
#if CYTHON_COMPILING_IN_LIMITED_API
typedef PyObject *__Pyx_TypeName;
#define __Pyx_FMT_TYPENAME "%U"
#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj)
#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
#define __Pyx_PyType_GetFullyQualifiedName PyType_GetFullyQualifiedName
#else
static __Pyx_TypeName __Pyx_PyType_GetFullyQualifiedName(PyTypeObject* tp);
#endif
#else  // !LIMITED_API
typedef const char *__Pyx_TypeName;
#define __Pyx_FMT_TYPENAME "%.200s"
#define __Pyx_PyType_GetFullyQualifiedName(tp) ((tp)->tp_name)
#define __Pyx_DECREF_TypeName(obj)
#endif

/* GCCDiagnostics.proto (used by CIntToPy) */
#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif

/* PyObjectCall.proto (used by PyObjectFastCall) */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif

/* PyObjectCallMethO.proto (used by PyObjectFastCall) */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif

/* PyObjectFastCall.proto (used by PyObjectVectorCallKwBuilder) */
#define __Pyx_PyObject_FastCall(func, args, nargs)  __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL)
static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject * const*args, size_t nargs, PyObject *kwargs);

/* PyObjectVectorCallKwBuilder.proto (used by CIntToPy) */
CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n);
#if CYTHON_VECTORCALL
#if PY_VERSION_HEX >= 0x03090000
#define __Pyx_Object_Vectorcall_CallFromBuilder PyObject_Vectorcall
#else
#define __Pyx_Object_Vectorcall_CallFromBuilder _PyObject_Vectorcall
#endif
#define __Pyx_MakeVectorcallBuilderKwds(n) PyTuple_New(n)
static int __Pyx_VectorcallBuilder_AddArg(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n);
static int __Pyx_VectorcallBuilder_AddArgStr(const char *key, PyObject *value, PyObject *builder, PyObject **args, int n);
#else
#define __Pyx_Object_Vectorcall_CallFromBuilder __Pyx_PyObject_FastCallDict
#define __Pyx_MakeVectorcallBuilderKwds(n) __Pyx_PyDict_NewPresized(n)
#define __Pyx_VectorcallBuilder_AddArg(key, value, builder, args, n) PyDict_SetItem(builder, key, value)
#define __Pyx_VectorcallBuilder_AddArgStr(key, value, builder, args, n) PyDict_SetItemString(builder, key, value)
#endif

/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_long(long value);

/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyLong_As_long(PyObject *);

/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyLong_As_int(PyObject *);

/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2))
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2) {
    return PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2);
}
#endif
#define __Pyx_PyErr_ExceptionMatches2(err1, err2)  __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_CurrentExceptionType(), err1, err2)
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
#ifdef PyExceptionInstance_Check
  #define __Pyx_PyBaseException_Check(obj) PyExceptionInstance_Check(obj)
#else
  #define __Pyx_PyBaseException_Check(obj) __Pyx_TypeCheck(obj, PyExc_BaseException)
#endif

/* GetRuntimeVersion.proto */
#if __PYX_LIMITED_VERSION_HEX < 0x030b0000
static unsigned long __Pyx_cached_runtime_version = 0;
static void __Pyx_init_runtime_version(void);
#else
#define __Pyx_init_runtime_version()
#endif
static unsigned long __Pyx_get_runtime_version(void);

/* AddModuleRef.proto */
#if ((CYTHON_COMPILING_IN_CPYTHON_FREETHREADING ) ||\
     __PYX_LIMITED_VERSION_HEX < 0x030d0000)
  static PyObject *__Pyx_PyImport_AddModuleRef(const char *name);
#else
  #define __Pyx_PyImport_AddModuleRef(name) PyImport_AddModuleRef(name)
#endif

/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer);

/* DecompressString.proto */
static PyObject *__Pyx_DecompressString(const char *s, Py_ssize_t length, int algo);

/* MultiPhaseInitModuleState.proto */
#if CYTHON_PEP489_MULTI_PHASE_INIT && CYTHON_USE_MODULE_STATE
static PyObject *__Pyx_State_FindModule(void*);
static int __Pyx_State_AddModule(PyObject* module, void*);
static int __Pyx_State_RemoveModule(void*);
#elif CYTHON_USE_MODULE_STATE
#define __Pyx_State_FindModule PyState_FindModule
#define __Pyx_State_AddModule PyState_AddModule
#define __Pyx_State_RemoveModule PyState_RemoveModule
#endif

/* #### Code section: module_declarations ### */
/* CythonABIVersion.proto */
#if CYTHON_COMPILING_IN_LIMITED_API
    #if CYTHON_METH_FASTCALL
        #define __PYX_FASTCALL_ABI_SUFFIX  "_fastcall"
    #else
        #define __PYX_FASTCALL_ABI_SUFFIX
    #endif
    #define __PYX_LIMITED_ABI_SUFFIX "limited" __PYX_FASTCALL_ABI_SUFFIX __PYX_AM_SEND_ABI_SUFFIX
#else
    #define __PYX_LIMITED_ABI_SUFFIX
#endif
#if __PYX_HAS_PY_AM_SEND == 1
    #define __PYX_AM_SEND_ABI_SUFFIX
#elif __PYX_HAS_PY_AM_SEND == 2
    #define __PYX_AM_SEND_ABI_SUFFIX "amsendbackport"
#else
    #define __PYX_AM_SEND_ABI_SUFFIX "noamsend"
#endif
#ifndef __PYX_MONITORING_ABI_SUFFIX
    #define __PYX_MONITORING_ABI_SUFFIX
#endif
#if CYTHON_USE_TP_FINALIZE
    #define __PYX_TP_FINALIZE_ABI_SUFFIX
#else
    #define __PYX_TP_FINALIZE_ABI_SUFFIX "nofinalize"
#endif
#if CYTHON_USE_FREELISTS || !defined(__Pyx_AsyncGen_USED)
    #define __PYX_FREELISTS_ABI_SUFFIX
#else
    #define __PYX_FREELISTS_ABI_SUFFIX "nofreelists"
#endif
#define CYTHON_ABI  __PYX_ABI_VERSION __PYX_LIMITED_ABI_SUFFIX __PYX_MONITORING_ABI_SUFFIX __PYX_TP_FINALIZE_ABI_SUFFIX __PYX_FREELISTS_ABI_SUFFIX __PYX_AM_SEND_ABI_SUFFIX
#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI
#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "."


/* Module declarations from "libc.stdint" */

/* Module declarations from "cuda.bindings._internal" */

/* Module declarations from "cuda.bindings._internal._nvml" */
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlInit_v2)(void); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlInitWithFlags)(unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlShutdown)(void); /*proto*/
static char const *(*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlErrorString)(nvmlReturn_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetDriverVersion)(char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetNVMLVersion)(char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetCudaDriverVersion)(int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetCudaDriverVersion_v2)(int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetProcessName)(unsigned int, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetHicVersion)(unsigned int *, nvmlHwbcEntry_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetTopologyGpuSet)(unsigned int, unsigned int *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetDriverBranch)(nvmlSystemDriverBranchInfo_t *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetCount)(unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetHandleByIndex)(unsigned int, nvmlUnit_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetUnitInfo)(nvmlUnit_t, nvmlUnitInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetLedState)(nvmlUnit_t, nvmlLedState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetPsuInfo)(nvmlUnit_t, nvmlPSUInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetTemperature)(nvmlUnit_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetFanSpeedInfo)(nvmlUnit_t, nvmlUnitFanSpeeds_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetDevices)(nvmlUnit_t, unsigned int *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCount_v2)(unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAttributes_v2)(nvmlDevice_t, nvmlDeviceAttributes_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByIndex_v2)(unsigned int, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleBySerial)(char const *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByUUID)(char const *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByUUIDV)(nvmlUUID_t const *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByPciBusId_v2)(char const *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetName)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBrand)(nvmlDevice_t, nvmlBrandType_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetIndex)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSerial)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetModuleId)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetC2cModeInfoV)(nvmlDevice_t, nvmlC2cModeInfo_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryAffinity)(nvmlDevice_t, unsigned int, unsigned long *, nvmlAffinityScope_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCpuAffinityWithinScope)(nvmlDevice_t, unsigned int, unsigned long *, nvmlAffinityScope_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCpuAffinity)(nvmlDevice_t, unsigned int, unsigned long *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetCpuAffinity)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearCpuAffinity)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNumaNodeId)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTopologyCommonAncestor)(nvmlDevice_t, nvmlDevice_t, nvmlGpuTopologyLevel_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTopologyNearestGpus)(nvmlDevice_t, nvmlGpuTopologyLevel_t, unsigned int *, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetP2PStatus)(nvmlDevice_t, nvmlDevice_t, nvmlGpuP2PCapsIndex_t, nvmlGpuP2PStatus_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetUUID)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMinorNumber)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBoardPartNumber)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetInforomVersion)(nvmlDevice_t, nvmlInforomObject_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetInforomImageVersion)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetInforomConfigurationChecksum)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceValidateInforom)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetLastBBXFlushTime)(nvmlDevice_t, unsigned PY_LONG_LONG *, unsigned long *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDisplayMode)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDisplayActive)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPersistenceMode)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPciInfoExt)(nvmlDevice_t, nvmlPciInfoExt_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPciInfo_v3)(nvmlDevice_t, nvmlPciInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxPcieLinkGeneration)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuMaxPcieLinkGeneration)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxPcieLinkWidth)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrPcieLinkGeneration)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrPcieLinkWidth)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieThroughput)(nvmlDevice_t, nvmlPcieUtilCounter_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieReplayCounter)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClockInfo)(nvmlDevice_t, nvmlClockType_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxClockInfo)(nvmlDevice_t, nvmlClockType_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpcClkVfOffset)(nvmlDevice_t, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClock)(nvmlDevice_t, nvmlClockType_t, nvmlClockId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxCustomerBoostClock)(nvmlDevice_t, nvmlClockType_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedMemoryClocks)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedGraphicsClocks)(nvmlDevice_t, unsigned int, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAutoBoostedClocksEnabled)(nvmlDevice_t, nvmlEnableState_t *, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanSpeed)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanSpeed_v2)(nvmlDevice_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanSpeedRPM)(nvmlDevice_t, nvmlFanSpeedInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTargetFanSpeed)(nvmlDevice_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMinMaxFanSpeed)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanControlPolicy_v2)(nvmlDevice_t, unsigned int, nvmlFanControlPolicy_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNumFans)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCoolerInfo)(nvmlDevice_t, nvmlCoolerInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTemperatureV)(nvmlDevice_t, nvmlTemperature_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTemperatureThreshold)(nvmlDevice_t, nvmlTemperatureThresholds_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMarginTemperature)(nvmlDevice_t, nvmlMarginTemperature_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetThermalSettings)(nvmlDevice_t, unsigned int, nvmlGpuThermalSettings_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPerformanceState)(nvmlDevice_t, nvmlPstates_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrentClocksEventReasons)(nvmlDevice_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedClocksEventReasons)(nvmlDevice_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerState)(nvmlDevice_t, nvmlPstates_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDynamicPstatesInfo)(nvmlDevice_t, nvmlGpuDynamicPstatesInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemClkVfOffset)(nvmlDevice_t, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMinMaxClockOfPState)(nvmlDevice_t, nvmlClockType_t, nvmlPstates_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedPerformanceStates)(nvmlDevice_t, nvmlPstates_t *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpcClkMinMaxVfOffset)(nvmlDevice_t, int *, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemClkMinMaxVfOffset)(nvmlDevice_t, int *, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClockOffsets)(nvmlDevice_t, nvmlClockOffset_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetClockOffsets)(nvmlDevice_t, nvmlClockOffset_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPerformanceModes)(nvmlDevice_t, nvmlDevicePerfModes_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrentClockFreqs)(nvmlDevice_t, nvmlDeviceCurrentClockFreqs_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerManagementLimit)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerManagementLimitConstraints)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerManagementDefaultLimit)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerUsage)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTotalEnergyConsumption)(nvmlDevice_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEnforcedPowerLimit)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuOperationMode)(nvmlDevice_t, nvmlGpuOperationMode_t *, nvmlGpuOperationMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryInfo_v2)(nvmlDevice_t, nvmlMemory_v2_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetComputeMode)(nvmlDevice_t, nvmlComputeMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCudaComputeCapability)(nvmlDevice_t, int *, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDramEncryptionMode)(nvmlDevice_t, nvmlDramEncryptionInfo_t *, nvmlDramEncryptionInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDramEncryptionMode)(nvmlDevice_t, nvmlDramEncryptionInfo_t const *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEccMode)(nvmlDevice_t, nvmlEnableState_t *, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDefaultEccMode)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBoardId)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMultiGpuBoard)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTotalEccErrors)(nvmlDevice_t, nvmlMemoryErrorType_t, nvmlEccCounterType_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryErrorCounter)(nvmlDevice_t, nvmlMemoryErrorType_t, nvmlEccCounterType_t, nvmlMemoryLocation_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetUtilizationRates)(nvmlDevice_t, nvmlUtilization_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderUtilization)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderCapacity)(nvmlDevice_t, nvmlEncoderType_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderStats)(nvmlDevice_t, unsigned int *, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderSessions)(nvmlDevice_t, unsigned int *, nvmlEncoderSessionInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDecoderUtilization)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetJpgUtilization)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetOfaUtilization)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFBCStats)(nvmlDevice_t, nvmlFBCStats_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFBCSessions)(nvmlDevice_t, unsigned int *, nvmlFBCSessionInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDriverModel_v2)(nvmlDevice_t, nvmlDriverModel_t *, nvmlDriverModel_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVbiosVersion)(nvmlDevice_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBridgeChipInfo)(nvmlDevice_t, nvmlBridgeChipHierarchy_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetComputeRunningProcesses_v3)(nvmlDevice_t, unsigned int *, nvmlProcessInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMPSComputeRunningProcesses_v3)(nvmlDevice_t, unsigned int *, nvmlProcessInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRunningProcessDetailList)(nvmlDevice_t, nvmlProcessDetailList_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceOnSameBoard)(nvmlDevice_t, nvmlDevice_t, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAPIRestriction)(nvmlDevice_t, nvmlRestrictedAPI_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSamples)(nvmlDevice_t, nvmlSamplingType_t, unsigned PY_LONG_LONG, nvmlValueType_t *, unsigned int *, nvmlSample_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBAR1MemoryInfo)(nvmlDevice_t, nvmlBAR1Memory_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetIrqNum)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNumGpuCores)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerSource)(nvmlDevice_t, nvmlPowerSource_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryBusWidth)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieLinkMaxSpeed)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieSpeed)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAdaptiveClockInfoStatus)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBusType)(nvmlDevice_t, nvmlBusType_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuFabricInfoV)(nvmlDevice_t, nvmlGpuFabricInfoV_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeCapabilities)(nvmlConfComputeSystemCaps_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeState)(nvmlConfComputeSystemState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeMemSizeInfo)(nvmlDevice_t, nvmlConfComputeMemSizeInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeGpusReadyState)(unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeProtectedMemoryUsage)(nvmlDevice_t, nvmlMemory_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeGpuCertificate)(nvmlDevice_t, nvmlConfComputeGpuCertificate_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeGpuAttestationReport)(nvmlDevice_t, nvmlConfComputeGpuAttestationReport_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeKeyRotationThresholdInfo)(nvmlConfComputeGetKeyRotationThresholdInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetConfComputeUnprotectedMemSize)(nvmlDevice_t, unsigned PY_LONG_LONG); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemSetConfComputeGpusReadyState)(unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemSetConfComputeKeyRotationThresholdInfo)(nvmlConfComputeSetKeyRotationThresholdInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeSettings)(nvmlSystemConfComputeSettings_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGspFirmwareVersion)(nvmlDevice_t, char *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGspFirmwareMode)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSramEccErrorStatus)(nvmlDevice_t, nvmlEccSramErrorStatus_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingMode)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingStats)(nvmlDevice_t, unsigned int, nvmlAccountingStats_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingPids)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingBufferSize)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRetiredPages)(nvmlDevice_t, nvmlPageRetirementCause_t, unsigned int *, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRetiredPages_v2)(nvmlDevice_t, nvmlPageRetirementCause_t, unsigned int *, unsigned PY_LONG_LONG *, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRetiredPagesPendingStatus)(nvmlDevice_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRemappedRows)(nvmlDevice_t, unsigned int *, unsigned int *, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRowRemapperHistogram)(nvmlDevice_t, nvmlRowRemapperHistogramValues_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetArchitecture)(nvmlDevice_t, nvmlDeviceArchitecture_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClkMonStatus)(nvmlDevice_t, nvmlClkMonStatus_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetProcessUtilization)(nvmlDevice_t, nvmlProcessUtilizationSample_t *, unsigned int *, unsigned PY_LONG_LONG); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetProcessesUtilizationInfo)(nvmlDevice_t, nvmlProcessesUtilizationInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPlatformInfo)(nvmlDevice_t, nvmlPlatformInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitSetLedState)(nvmlUnit_t, nvmlLedColor_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPersistenceMode)(nvmlDevice_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetComputeMode)(nvmlDevice_t, nvmlComputeMode_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetEccMode)(nvmlDevice_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearEccErrorCounts)(nvmlDevice_t, nvmlEccCounterType_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDriverModel)(nvmlDevice_t, nvmlDriverModel_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetGpuLockedClocks)(nvmlDevice_t, unsigned int, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceResetGpuLockedClocks)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetMemoryLockedClocks)(nvmlDevice_t, unsigned int, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceResetMemoryLockedClocks)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetAutoBoostedClocksEnabled)(nvmlDevice_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDefaultAutoBoostedClocksEnabled)(nvmlDevice_t, nvmlEnableState_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDefaultFanSpeed_v2)(nvmlDevice_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetFanControlPolicy)(nvmlDevice_t, unsigned int, nvmlFanControlPolicy_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetTemperatureThreshold)(nvmlDevice_t, nvmlTemperatureThresholds_t, int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPowerManagementLimit)(nvmlDevice_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetGpuOperationMode)(nvmlDevice_t, nvmlGpuOperationMode_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetAPIRestriction)(nvmlDevice_t, nvmlRestrictedAPI_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetFanSpeed_v2)(nvmlDevice_t, unsigned int, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetAccountingMode)(nvmlDevice_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearAccountingPids)(nvmlDevice_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPowerManagementLimit_v2)(nvmlDevice_t, nvmlPowerValue_v2_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkState)(nvmlDevice_t, unsigned int, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkVersion)(nvmlDevice_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkCapability)(nvmlDevice_t, unsigned int, nvmlNvLinkCapability_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkRemotePciInfo_v2)(nvmlDevice_t, unsigned int, nvmlPciInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkErrorCounter)(nvmlDevice_t, unsigned int, nvmlNvLinkErrorCounter_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceResetNvLinkErrorCounters)(nvmlDevice_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkRemoteDeviceType)(nvmlDevice_t, unsigned int, nvmlIntNvLinkDeviceType_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetNvLinkDeviceLowPowerThreshold)(nvmlDevice_t, nvmlNvLinkPowerThres_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemSetNvlinkBwMode)(unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetNvlinkBwMode)(unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvlinkSupportedBwModes)(nvmlDevice_t, nvmlNvlinkSupportedBwModes_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvlinkBwMode)(nvmlDevice_t, nvmlNvlinkGetBwMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetNvlinkBwMode)(nvmlDevice_t, nvmlNvlinkSetBwMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlEventSetCreate)(nvmlEventSet_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceRegisterEvents)(nvmlDevice_t, unsigned PY_LONG_LONG, nvmlEventSet_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedEventTypes)(nvmlDevice_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlEventSetWait_v2)(nvmlEventSet_t, nvmlEventData_t *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlEventSetFree)(nvmlEventSet_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemEventSetCreate)(nvmlSystemEventSetCreateRequest_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemEventSetFree)(nvmlSystemEventSetFreeRequest_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemRegisterEvents)(nvmlSystemRegisterEventRequest_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemEventSetWait)(nvmlSystemEventSetWaitRequest_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceModifyDrainState)(nvmlPciInfo_t *, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceQueryDrainState)(nvmlPciInfo_t *, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceRemoveGpu_v2)(nvmlPciInfo_t *, nvmlDetachGpuState_t, nvmlPcieLinkState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceDiscoverGpus)(nvmlPciInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFieldValues)(nvmlDevice_t, int, nvmlFieldValue_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearFieldValues)(nvmlDevice_t, int, nvmlFieldValue_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVirtualizationMode)(nvmlDevice_t, nvmlGpuVirtualizationMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHostVgpuMode)(nvmlDevice_t, nvmlHostVgpuMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVirtualizationMode)(nvmlDevice_t, nvmlGpuVirtualizationMode_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuHeterogeneousMode)(nvmlDevice_t, nvmlVgpuHeterogeneousMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVgpuHeterogeneousMode)(nvmlDevice_t, nvmlVgpuHeterogeneousMode_t const *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetPlacementId)(nvmlVgpuInstance_t, nvmlVgpuPlacementId_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuTypeSupportedPlacements)(nvmlDevice_t, nvmlVgpuTypeId_t, nvmlVgpuPlacementList_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuTypeCreatablePlacements)(nvmlDevice_t, nvmlVgpuTypeId_t, nvmlVgpuPlacementList_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetGspHeapSize)(nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetFbReservation)(nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetRuntimeStateSize)(nvmlVgpuInstance_t, nvmlVgpuRuntimeState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVgpuCapabilities)(nvmlDevice_t, nvmlDeviceVgpuCapability_t, nvmlEnableState_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGridLicensableFeatures_v4)(nvmlDevice_t, nvmlGridLicensableFeatures_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetVgpuDriverCapabilities)(nvmlVgpuDriverCapability_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuCapabilities)(nvmlDevice_t, nvmlDeviceVgpuCapability_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedVgpus)(nvmlDevice_t, unsigned int *, nvmlVgpuTypeId_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCreatableVgpus)(nvmlDevice_t, unsigned int *, nvmlVgpuTypeId_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetClass)(nvmlVgpuTypeId_t, char *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetName)(nvmlVgpuTypeId_t, char *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetGpuInstanceProfileId)(nvmlVgpuTypeId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetDeviceID)(nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetFramebufferSize)(nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetNumDisplayHeads)(nvmlVgpuTypeId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetResolution)(nvmlVgpuTypeId_t, unsigned int, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetLicense)(nvmlVgpuTypeId_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetFrameRateLimit)(nvmlVgpuTypeId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetMaxInstances)(nvmlDevice_t, nvmlVgpuTypeId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetMaxInstancesPerVm)(nvmlVgpuTypeId_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetBAR1Info)(nvmlVgpuTypeId_t, nvmlVgpuTypeBar1Info_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetActiveVgpus)(nvmlDevice_t, unsigned int *, nvmlVgpuInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetVmID)(nvmlVgpuInstance_t, char *, unsigned int, nvmlVgpuVmIdType_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetUUID)(nvmlVgpuInstance_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetVmDriverVersion)(nvmlVgpuInstance_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFbUsage)(nvmlVgpuInstance_t, unsigned PY_LONG_LONG *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetLicenseStatus)(nvmlVgpuInstance_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetType)(nvmlVgpuInstance_t, nvmlVgpuTypeId_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFrameRateLimit)(nvmlVgpuInstance_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEccMode)(nvmlVgpuInstance_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEncoderCapacity)(nvmlVgpuInstance_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceSetEncoderCapacity)(nvmlVgpuInstance_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEncoderStats)(nvmlVgpuInstance_t, unsigned int *, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEncoderSessions)(nvmlVgpuInstance_t, unsigned int *, nvmlEncoderSessionInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFBCStats)(nvmlVgpuInstance_t, nvmlFBCStats_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFBCSessions)(nvmlVgpuInstance_t, unsigned int *, nvmlFBCSessionInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetGpuInstanceId)(nvmlVgpuInstance_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetGpuPciId)(nvmlVgpuInstance_t, char *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetCapabilities)(nvmlVgpuTypeId_t, nvmlVgpuCapability_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetMdevUUID)(nvmlVgpuInstance_t, char *, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetCreatableVgpus)(nvmlGpuInstance_t, nvmlVgpuTypeIdInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetMaxInstancesPerGpuInstance)(nvmlVgpuTypeMaxInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetActiveVgpus)(nvmlGpuInstance_t, nvmlActiveVgpuInstanceInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceSetVgpuSchedulerState)(nvmlGpuInstance_t, nvmlVgpuSchedulerState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuSchedulerState)(nvmlGpuInstance_t, nvmlVgpuSchedulerStateInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuSchedulerLog)(nvmlGpuInstance_t, nvmlVgpuSchedulerLogInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuTypeCreatablePlacements)(nvmlGpuInstance_t, nvmlVgpuCreatablePlacementInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuHeterogeneousMode)(nvmlGpuInstance_t, nvmlVgpuHeterogeneousMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceSetVgpuHeterogeneousMode)(nvmlGpuInstance_t, nvmlVgpuHeterogeneousMode_t const *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetMetadata)(nvmlVgpuInstance_t, nvmlVgpuMetadata_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuMetadata)(nvmlDevice_t, nvmlVgpuPgpuMetadata_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetVgpuCompatibility)(nvmlVgpuMetadata_t *, nvmlVgpuPgpuMetadata_t *, nvmlVgpuPgpuCompatibility_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPgpuMetadataString)(nvmlDevice_t, char *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuSchedulerLog)(nvmlDevice_t, nvmlVgpuSchedulerLog_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuSchedulerState)(nvmlDevice_t, nvmlVgpuSchedulerGetState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuSchedulerCapabilities)(nvmlDevice_t, nvmlVgpuSchedulerCapabilities_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVgpuSchedulerState)(nvmlDevice_t, nvmlVgpuSchedulerSetState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetVgpuVersion)(nvmlVgpuVersion_t *, nvmlVgpuVersion_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSetVgpuVersion)(nvmlVgpuVersion_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuUtilization)(nvmlDevice_t, unsigned PY_LONG_LONG, nvmlValueType_t *, unsigned int *, nvmlVgpuInstanceUtilizationSample_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuInstancesUtilizationInfo)(nvmlDevice_t, nvmlVgpuInstancesUtilizationInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuProcessUtilization)(nvmlDevice_t, unsigned PY_LONG_LONG, unsigned int *, nvmlVgpuProcessUtilizationSample_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuProcessesUtilizationInfo)(nvmlDevice_t, nvmlVgpuProcessesUtilizationInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetAccountingMode)(nvmlVgpuInstance_t, nvmlEnableState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetAccountingPids)(nvmlVgpuInstance_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetAccountingStats)(nvmlVgpuInstance_t, unsigned int, nvmlAccountingStats_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceClearAccountingPids)(nvmlVgpuInstance_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetLicenseInfo_v2)(nvmlVgpuInstance_t, nvmlVgpuLicenseInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetExcludedDeviceCount)(unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetExcludedDeviceInfoByIndex)(unsigned int, nvmlExcludedDeviceInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetMigMode)(nvmlDevice_t, unsigned int, nvmlReturn_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMigMode)(nvmlDevice_t, unsigned int *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceProfileInfoV)(nvmlDevice_t, unsigned int, nvmlGpuInstanceProfileInfo_v2_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstancePossiblePlacements_v2)(nvmlDevice_t, unsigned int, nvmlGpuInstancePlacement_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceRemainingCapacity)(nvmlDevice_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceCreateGpuInstance)(nvmlDevice_t, unsigned int, nvmlGpuInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceCreateGpuInstanceWithPlacement)(nvmlDevice_t, unsigned int, nvmlGpuInstancePlacement_t const *, nvmlGpuInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceDestroy)(nvmlGpuInstance_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstances)(nvmlDevice_t, unsigned int, nvmlGpuInstance_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceById)(nvmlDevice_t, unsigned int, nvmlGpuInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetInfo)(nvmlGpuInstance_t, nvmlGpuInstanceInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstanceProfileInfoV)(nvmlGpuInstance_t, unsigned int, unsigned int, nvmlComputeInstanceProfileInfo_v2_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstanceRemainingCapacity)(nvmlGpuInstance_t, unsigned int, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstancePossiblePlacements)(nvmlGpuInstance_t, unsigned int, nvmlComputeInstancePlacement_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceCreateComputeInstance)(nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceCreateComputeInstanceWithPlacement)(nvmlGpuInstance_t, unsigned int, nvmlComputeInstancePlacement_t const *, nvmlComputeInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlComputeInstanceDestroy)(nvmlComputeInstance_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstances)(nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstanceById)(nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlComputeInstanceGetInfo_v2)(nvmlComputeInstance_t, nvmlComputeInstanceInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceIsMigDeviceHandle)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceId)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetComputeInstanceId)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxMigDeviceCount)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMigDeviceHandleByIndex)(nvmlDevice_t, unsigned int, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDeviceHandleFromMigDeviceHandle)(nvmlDevice_t, nvmlDevice_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmSampleGet)(nvmlDevice_t, nvmlGpmSample_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmMigSampleGet)(nvmlDevice_t, unsigned int, nvmlGpmSample_t); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmQueryDeviceSupport)(nvmlDevice_t, nvmlGpmSupport_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmQueryIfStreamingEnabled)(nvmlDevice_t, unsigned int *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmSetStreamingEnabled)(nvmlDevice_t, unsigned int); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCapabilities)(nvmlDevice_t, nvmlDeviceCapabilities_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceWorkloadPowerProfileClearRequestedProfiles)(nvmlDevice_t, nvmlWorkloadPowerProfileRequestedProfiles_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDevicePowerSmoothingActivatePresetProfile)(nvmlDevice_t, nvmlPowerSmoothingProfile_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDevicePowerSmoothingUpdatePresetProfileParam)(nvmlDevice_t, nvmlPowerSmoothingProfile_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDevicePowerSmoothingSetState)(nvmlDevice_t, nvmlPowerSmoothingState_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAddressingMode)(nvmlDevice_t, nvmlDeviceAddressingMode_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRepairStatus)(nvmlDevice_t, nvmlRepairStatus_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerMizerMode_v1)(nvmlDevice_t, nvmlDevicePowerMizerModes_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPowerMizerMode_v1)(nvmlDevice_t, nvmlDevicePowerMizerModes_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPdi)(nvmlDevice_t, nvmlPdi_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetHostname_v1)(nvmlDevice_t, nvmlHostname_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHostname_v1)(nvmlDevice_t, nvmlHostname_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkInfo)(nvmlDevice_t, nvmlNvLinkInfo_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceReadWritePRM_v1)(nvmlDevice_t, nvmlPRMTLV_v1_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceProfileInfoByIdV)(nvmlDevice_t, unsigned int, nvmlGpuInstanceProfileInfo_v2_t *); /*proto*/
static nvmlReturn_t (*__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts)(nvmlDevice_t, nvmlEccSramUniqueUncorrectedErrorCounts_t *); /*proto*/

/* Module declarations from "cuda.bindings.cy_nvml" */
/* #### Code section: typeinfo ### */
/* #### Code section: before_global_var ### */
#define __Pyx_MODULE_NAME "cuda.bindings.cy_nvml"
extern int __pyx_module_is_main_cuda__bindings__cy_nvml;
int __pyx_module_is_main_cuda__bindings__cy_nvml = 0;

/* Implementation of "cuda.bindings.cy_nvml" */
/* #### Code section: global_var ### */
/* #### Code section: string_decls ### */
/* #### Code section: decls ### */
/* #### Code section: late_includes ### */
/* #### Code section: module_state ### */
/* SmallCodeConfig */
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
    #define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
    #define CYTHON_SMALL_CODE __attribute__((cold))
#else
    #define CYTHON_SMALL_CODE
#endif
#endif

typedef struct {
  PyObject *__pyx_d;
  PyObject *__pyx_b;
  PyObject *__pyx_cython_runtime;
  PyObject *__pyx_empty_tuple;
  PyObject *__pyx_empty_bytes;
  PyObject *__pyx_empty_unicode;
  PyObject *__pyx_string_tab[11];
/* #### Code section: module_state_contents ### */
/* CodeObjectCache.module_state_decls */
struct __Pyx_CodeObjectCache __pyx_code_cache;

/* #### Code section: module_state_end ### */
} __pyx_mstatetype;

#if CYTHON_USE_MODULE_STATE
#ifdef __cplusplus
namespace {
extern struct PyModuleDef __pyx_moduledef;
} /* anonymous namespace */
#else
static struct PyModuleDef __pyx_moduledef;
#endif

#define __pyx_mstate_global (__Pyx_PyModule_GetState(__Pyx_State_FindModule(&__pyx_moduledef)))

#define __pyx_m (__Pyx_State_FindModule(&__pyx_moduledef))
#else
static __pyx_mstatetype __pyx_mstate_global_static =
#ifdef __cplusplus
    {};
#else
    {0};
#endif
static __pyx_mstatetype * const __pyx_mstate_global = &__pyx_mstate_global_static;
#endif
/* #### Code section: constant_name_defines ### */
#define __pyx_kp_u_ __pyx_string_tab[0]
#define __pyx_n_u_cline_in_traceback __pyx_string_tab[1]
#define __pyx_n_u_main __pyx_string_tab[2]
#define __pyx_n_u_module __pyx_string_tab[3]
#define __pyx_n_u_name __pyx_string_tab[4]
#define __pyx_n_u_pyx_capi __pyx_string_tab[5]
#define __pyx_n_u_qualname __pyx_string_tab[6]
#define __pyx_n_u_setdefault __pyx_string_tab[7]
#define __pyx_n_u_test __pyx_string_tab[8]
#define __pyx_kp_b_char_const_nvmlReturn_t_nvmlRetu __pyx_string_tab[9]
#define __pyx_kp_b_char_const_nvmlReturn_t_nvmlRetu_2 __pyx_string_tab[10]
/* #### Code section: module_state_clear ### */
#if CYTHON_USE_MODULE_STATE
static CYTHON_SMALL_CODE int __pyx_m_clear(PyObject *m) {
  __pyx_mstatetype *clear_module_state = __Pyx_PyModule_GetState(m);
  if (!clear_module_state) return 0;
  Py_CLEAR(clear_module_state->__pyx_d);
  Py_CLEAR(clear_module_state->__pyx_b);
  Py_CLEAR(clear_module_state->__pyx_cython_runtime);
  Py_CLEAR(clear_module_state->__pyx_empty_tuple);
  Py_CLEAR(clear_module_state->__pyx_empty_bytes);
  Py_CLEAR(clear_module_state->__pyx_empty_unicode);
  #if CYTHON_PEP489_MULTI_PHASE_INIT
  __Pyx_State_RemoveModule(NULL);
  #endif
  for (int i=0; i<11; ++i) { Py_CLEAR(clear_module_state->__pyx_string_tab[i]); }
/* #### Code section: module_state_clear_contents ### */
/* #### Code section: module_state_clear_end ### */
return 0;
}
#endif
/* #### Code section: module_state_traverse ### */
#if CYTHON_USE_MODULE_STATE
static CYTHON_SMALL_CODE int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) {
  __pyx_mstatetype *traverse_module_state = __Pyx_PyModule_GetState(m);
  if (!traverse_module_state) return 0;
  Py_VISIT(traverse_module_state->__pyx_d);
  Py_VISIT(traverse_module_state->__pyx_b);
  Py_VISIT(traverse_module_state->__pyx_cython_runtime);
  __Pyx_VISIT_CONST(traverse_module_state->__pyx_empty_tuple);
  __Pyx_VISIT_CONST(traverse_module_state->__pyx_empty_bytes);
  __Pyx_VISIT_CONST(traverse_module_state->__pyx_empty_unicode);
  for (int i=0; i<11; ++i) { __Pyx_VISIT_CONST(traverse_module_state->__pyx_string_tab[i]); }
/* #### Code section: module_state_traverse_contents ### */
/* #### Code section: module_state_traverse_end ### */
return 0;
}
#endif
/* #### Code section: module_code ### */

/* "cuda/bindings/cy_nvml.pyx":14
 * ###############################################################################
 * 
 * cdef nvmlReturn_t nvmlInit_v2() except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlInit_v2()
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlInit_v2(void) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":15
 * 
 * cdef nvmlReturn_t nvmlInit_v2() except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlInit_v2()             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlInit_v2(); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 15, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":14
 * ###############################################################################
 * 
 * cdef nvmlReturn_t nvmlInit_v2() except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlInit_v2()
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlInit_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":18
 * 
 * 
 * cdef nvmlReturn_t nvmlInitWithFlags(unsigned int flags) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlInitWithFlags(flags)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlInitWithFlags(unsigned int __pyx_v_flags) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":19
 * 
 * cdef nvmlReturn_t nvmlInitWithFlags(unsigned int flags) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlInitWithFlags(flags)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlInitWithFlags(__pyx_v_flags); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 19, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":18
 * 
 * 
 * cdef nvmlReturn_t nvmlInitWithFlags(unsigned int flags) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlInitWithFlags(flags)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlInitWithFlags", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":22
 * 
 * 
 * cdef nvmlReturn_t nvmlShutdown() except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlShutdown()
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlShutdown(void) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":23
 * 
 * cdef nvmlReturn_t nvmlShutdown() except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlShutdown()             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlShutdown(); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 23, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":22
 * 
 * 
 * cdef nvmlReturn_t nvmlShutdown() except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlShutdown()
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlShutdown", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":26
 * 
 * 
 * cdef const char* nvmlErrorString(nvmlReturn_t result) except?NULL nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlErrorString(result)
 * 
*/

static char const *__pyx_f_4cuda_8bindings_7cy_nvml_nvmlErrorString(nvmlReturn_t __pyx_v_result) {
  char const *__pyx_r;
  char const *__pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":27
 * 
 * cdef const char* nvmlErrorString(nvmlReturn_t result) except?NULL nogil:
 *     return _nvml._nvmlErrorString(result)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlErrorString(__pyx_v_result); if (unlikely(__pyx_t_1 == ((void *)NULL) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 27, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":26
 * 
 * 
 * cdef const char* nvmlErrorString(nvmlReturn_t result) except?NULL nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlErrorString(result)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlErrorString", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = NULL;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":30
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetDriverVersion(char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetDriverVersion(version, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetDriverVersion(char *__pyx_v_version, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":31
 * 
 * cdef nvmlReturn_t nvmlSystemGetDriverVersion(char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetDriverVersion(version, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetDriverVersion(__pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 31, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":30
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetDriverVersion(char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetDriverVersion(version, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetDriverVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":34
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetNVMLVersion(char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetNVMLVersion(version, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetNVMLVersion(char *__pyx_v_version, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":35
 * 
 * cdef nvmlReturn_t nvmlSystemGetNVMLVersion(char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetNVMLVersion(version, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetNVMLVersion(__pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 35, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":34
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetNVMLVersion(char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetNVMLVersion(version, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetNVMLVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":38
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetCudaDriverVersion(int* cudaDriverVersion) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetCudaDriverVersion(cudaDriverVersion)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetCudaDriverVersion(int *__pyx_v_cudaDriverVersion) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":39
 * 
 * cdef nvmlReturn_t nvmlSystemGetCudaDriverVersion(int* cudaDriverVersion) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetCudaDriverVersion(cudaDriverVersion)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetCudaDriverVersion(__pyx_v_cudaDriverVersion); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 39, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":38
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetCudaDriverVersion(int* cudaDriverVersion) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetCudaDriverVersion(cudaDriverVersion)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetCudaDriverVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":42
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetCudaDriverVersion_v2(int* cudaDriverVersion) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetCudaDriverVersion_v2(cudaDriverVersion)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetCudaDriverVersion_v2(int *__pyx_v_cudaDriverVersion) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":43
 * 
 * cdef nvmlReturn_t nvmlSystemGetCudaDriverVersion_v2(int* cudaDriverVersion) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetCudaDriverVersion_v2(cudaDriverVersion)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetCudaDriverVersion_v2(__pyx_v_cudaDriverVersion); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 43, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":42
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetCudaDriverVersion_v2(int* cudaDriverVersion) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetCudaDriverVersion_v2(cudaDriverVersion)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetCudaDriverVersion_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":46
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetProcessName(unsigned int pid, char* name, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetProcessName(pid, name, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetProcessName(unsigned int __pyx_v_pid, char *__pyx_v_name, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":47
 * 
 * cdef nvmlReturn_t nvmlSystemGetProcessName(unsigned int pid, char* name, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetProcessName(pid, name, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetProcessName(__pyx_v_pid, __pyx_v_name, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 47, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":46
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetProcessName(unsigned int pid, char* name, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetProcessName(pid, name, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetProcessName", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":50
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetHicVersion(unsigned int* hwbcCount, nvmlHwbcEntry_t* hwbcEntries) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetHicVersion(hwbcCount, hwbcEntries)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetHicVersion(unsigned int *__pyx_v_hwbcCount, nvmlHwbcEntry_t *__pyx_v_hwbcEntries) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":51
 * 
 * cdef nvmlReturn_t nvmlSystemGetHicVersion(unsigned int* hwbcCount, nvmlHwbcEntry_t* hwbcEntries) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetHicVersion(hwbcCount, hwbcEntries)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetHicVersion(__pyx_v_hwbcCount, __pyx_v_hwbcEntries); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 51, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":50
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetHicVersion(unsigned int* hwbcCount, nvmlHwbcEntry_t* hwbcEntries) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetHicVersion(hwbcCount, hwbcEntries)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetHicVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":54
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int* count, nvmlDevice_t* deviceArray) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetTopologyGpuSet(cpuNumber, count, deviceArray)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetTopologyGpuSet(unsigned int __pyx_v_cpuNumber, unsigned int *__pyx_v_count, nvmlDevice_t *__pyx_v_deviceArray) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":55
 * 
 * cdef nvmlReturn_t nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int* count, nvmlDevice_t* deviceArray) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetTopologyGpuSet(cpuNumber, count, deviceArray)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetTopologyGpuSet(__pyx_v_cpuNumber, __pyx_v_count, __pyx_v_deviceArray); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 55, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":54
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int* count, nvmlDevice_t* deviceArray) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetTopologyGpuSet(cpuNumber, count, deviceArray)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetTopologyGpuSet", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":58
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetDriverBranch(nvmlSystemDriverBranchInfo_t* branchInfo, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetDriverBranch(branchInfo, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetDriverBranch(nvmlSystemDriverBranchInfo_t *__pyx_v_branchInfo, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":59
 * 
 * cdef nvmlReturn_t nvmlSystemGetDriverBranch(nvmlSystemDriverBranchInfo_t* branchInfo, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetDriverBranch(branchInfo, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetDriverBranch(__pyx_v_branchInfo, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 59, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":58
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetDriverBranch(nvmlSystemDriverBranchInfo_t* branchInfo, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetDriverBranch(branchInfo, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetDriverBranch", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":62
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetCount(unsigned int* unitCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetCount(unitCount)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetCount(unsigned int *__pyx_v_unitCount) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":63
 * 
 * cdef nvmlReturn_t nvmlUnitGetCount(unsigned int* unitCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlUnitGetCount(unitCount)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetCount(__pyx_v_unitCount); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 63, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":62
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetCount(unsigned int* unitCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetCount(unitCount)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlUnitGetCount", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":66
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetHandleByIndex(unsigned int index, nvmlUnit_t* unit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetHandleByIndex(index, unit)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetHandleByIndex(unsigned int __pyx_v_index, nvmlUnit_t *__pyx_v_unit) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":67
 * 
 * cdef nvmlReturn_t nvmlUnitGetHandleByIndex(unsigned int index, nvmlUnit_t* unit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlUnitGetHandleByIndex(index, unit)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetHandleByIndex(__pyx_v_index, __pyx_v_unit); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 67, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":66
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetHandleByIndex(unsigned int index, nvmlUnit_t* unit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetHandleByIndex(index, unit)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlUnitGetHandleByIndex", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":70
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetUnitInfo(nvmlUnit_t unit, nvmlUnitInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetUnitInfo(unit, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetUnitInfo(nvmlUnit_t __pyx_v_unit, nvmlUnitInfo_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":71
 * 
 * cdef nvmlReturn_t nvmlUnitGetUnitInfo(nvmlUnit_t unit, nvmlUnitInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlUnitGetUnitInfo(unit, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetUnitInfo(__pyx_v_unit, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 71, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":70
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetUnitInfo(nvmlUnit_t unit, nvmlUnitInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetUnitInfo(unit, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlUnitGetUnitInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":74
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetLedState(nvmlUnit_t unit, nvmlLedState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetLedState(unit, state)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetLedState(nvmlUnit_t __pyx_v_unit, nvmlLedState_t *__pyx_v_state) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":75
 * 
 * cdef nvmlReturn_t nvmlUnitGetLedState(nvmlUnit_t unit, nvmlLedState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlUnitGetLedState(unit, state)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetLedState(__pyx_v_unit, __pyx_v_state); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 75, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":74
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetLedState(nvmlUnit_t unit, nvmlLedState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetLedState(unit, state)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlUnitGetLedState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":78
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetPsuInfo(nvmlUnit_t unit, nvmlPSUInfo_t* psu) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetPsuInfo(unit, psu)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetPsuInfo(nvmlUnit_t __pyx_v_unit, nvmlPSUInfo_t *__pyx_v_psu) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":79
 * 
 * cdef nvmlReturn_t nvmlUnitGetPsuInfo(nvmlUnit_t unit, nvmlPSUInfo_t* psu) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlUnitGetPsuInfo(unit, psu)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetPsuInfo(__pyx_v_unit, __pyx_v_psu); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 79, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":78
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetPsuInfo(nvmlUnit_t unit, nvmlPSUInfo_t* psu) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetPsuInfo(unit, psu)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlUnitGetPsuInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":82
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetTemperature(nvmlUnit_t unit, unsigned int type, unsigned int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetTemperature(unit, type, temp)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetTemperature(nvmlUnit_t __pyx_v_unit, unsigned int __pyx_v_type, unsigned int *__pyx_v_temp) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":83
 * 
 * cdef nvmlReturn_t nvmlUnitGetTemperature(nvmlUnit_t unit, unsigned int type, unsigned int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlUnitGetTemperature(unit, type, temp)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetTemperature(__pyx_v_unit, __pyx_v_type, __pyx_v_temp); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 83, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":82
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetTemperature(nvmlUnit_t unit, unsigned int type, unsigned int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetTemperature(unit, type, temp)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlUnitGetTemperature", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":86
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetFanSpeedInfo(nvmlUnit_t unit, nvmlUnitFanSpeeds_t* fanSpeeds) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetFanSpeedInfo(unit, fanSpeeds)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetFanSpeedInfo(nvmlUnit_t __pyx_v_unit, nvmlUnitFanSpeeds_t *__pyx_v_fanSpeeds) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":87
 * 
 * cdef nvmlReturn_t nvmlUnitGetFanSpeedInfo(nvmlUnit_t unit, nvmlUnitFanSpeeds_t* fanSpeeds) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlUnitGetFanSpeedInfo(unit, fanSpeeds)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetFanSpeedInfo(__pyx_v_unit, __pyx_v_fanSpeeds); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 87, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":86
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetFanSpeedInfo(nvmlUnit_t unit, nvmlUnitFanSpeeds_t* fanSpeeds) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetFanSpeedInfo(unit, fanSpeeds)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlUnitGetFanSpeedInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":90
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetDevices(nvmlUnit_t unit, unsigned int* deviceCount, nvmlDevice_t* devices) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetDevices(unit, deviceCount, devices)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetDevices(nvmlUnit_t __pyx_v_unit, unsigned int *__pyx_v_deviceCount, nvmlDevice_t *__pyx_v_devices) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":91
 * 
 * cdef nvmlReturn_t nvmlUnitGetDevices(nvmlUnit_t unit, unsigned int* deviceCount, nvmlDevice_t* devices) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlUnitGetDevices(unit, deviceCount, devices)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetDevices(__pyx_v_unit, __pyx_v_deviceCount, __pyx_v_devices); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 91, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":90
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitGetDevices(nvmlUnit_t unit, unsigned int* deviceCount, nvmlDevice_t* devices) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitGetDevices(unit, deviceCount, devices)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlUnitGetDevices", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":94
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCount_v2(unsigned int* deviceCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCount_v2(deviceCount)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCount_v2(unsigned int *__pyx_v_deviceCount) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":95
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCount_v2(unsigned int* deviceCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCount_v2(deviceCount)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCount_v2(__pyx_v_deviceCount); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 95, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":94
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCount_v2(unsigned int* deviceCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCount_v2(deviceCount)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCount_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":98
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAttributes_v2(nvmlDevice_t device, nvmlDeviceAttributes_t* attributes) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAttributes_v2(device, attributes)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAttributes_v2(nvmlDevice_t __pyx_v_device, nvmlDeviceAttributes_t *__pyx_v_attributes) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":99
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAttributes_v2(nvmlDevice_t device, nvmlDeviceAttributes_t* attributes) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetAttributes_v2(device, attributes)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAttributes_v2(__pyx_v_device, __pyx_v_attributes); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 99, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":98
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAttributes_v2(nvmlDevice_t device, nvmlDeviceAttributes_t* attributes) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAttributes_v2(device, attributes)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetAttributes_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":102
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByIndex_v2(unsigned int index, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHandleByIndex_v2(index, device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByIndex_v2(unsigned int __pyx_v_index, nvmlDevice_t *__pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":103
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByIndex_v2(unsigned int index, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetHandleByIndex_v2(index, device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByIndex_v2(__pyx_v_index, __pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 103, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":102
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByIndex_v2(unsigned int index, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHandleByIndex_v2(index, device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetHandleByIndex_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":106
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleBySerial(const char* serial, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHandleBySerial(serial, device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleBySerial(char const *__pyx_v_serial, nvmlDevice_t *__pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":107
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleBySerial(const char* serial, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetHandleBySerial(serial, device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleBySerial(__pyx_v_serial, __pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 107, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":106
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleBySerial(const char* serial, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHandleBySerial(serial, device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetHandleBySerial", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":110
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByUUID(const char* uuid, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHandleByUUID(uuid, device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByUUID(char const *__pyx_v_uuid, nvmlDevice_t *__pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":111
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByUUID(const char* uuid, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetHandleByUUID(uuid, device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByUUID(__pyx_v_uuid, __pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 111, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":110
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByUUID(const char* uuid, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHandleByUUID(uuid, device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetHandleByUUID", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":114
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByUUIDV(const nvmlUUID_t* uuid, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHandleByUUIDV(uuid, device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByUUIDV(nvmlUUID_t const *__pyx_v_uuid, nvmlDevice_t *__pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":115
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByUUIDV(const nvmlUUID_t* uuid, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetHandleByUUIDV(uuid, device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByUUIDV(__pyx_v_uuid, __pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 115, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":114
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByUUIDV(const nvmlUUID_t* uuid, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHandleByUUIDV(uuid, device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetHandleByUUIDV", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":118
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByPciBusId_v2(const char* pciBusId, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHandleByPciBusId_v2(pciBusId, device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByPciBusId_v2(char const *__pyx_v_pciBusId, nvmlDevice_t *__pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":119
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByPciBusId_v2(const char* pciBusId, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetHandleByPciBusId_v2(pciBusId, device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByPciBusId_v2(__pyx_v_pciBusId, __pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 119, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":118
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHandleByPciBusId_v2(const char* pciBusId, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHandleByPciBusId_v2(pciBusId, device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetHandleByPciBusId_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":122
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetName(nvmlDevice_t device, char* name, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetName(device, name, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetName(nvmlDevice_t __pyx_v_device, char *__pyx_v_name, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":123
 * 
 * cdef nvmlReturn_t nvmlDeviceGetName(nvmlDevice_t device, char* name, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetName(device, name, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetName(__pyx_v_device, __pyx_v_name, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 123, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":122
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetName(nvmlDevice_t device, char* name, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetName(device, name, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetName", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":126
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBrand(nvmlDevice_t device, nvmlBrandType_t* type) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBrand(device, type)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBrand(nvmlDevice_t __pyx_v_device, nvmlBrandType_t *__pyx_v_type) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":127
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBrand(nvmlDevice_t device, nvmlBrandType_t* type) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetBrand(device, type)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBrand(__pyx_v_device, __pyx_v_type); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 127, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":126
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBrand(nvmlDevice_t device, nvmlBrandType_t* type) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBrand(device, type)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetBrand", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":130
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetIndex(nvmlDevice_t device, unsigned int* index) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetIndex(device, index)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetIndex(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_index) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":131
 * 
 * cdef nvmlReturn_t nvmlDeviceGetIndex(nvmlDevice_t device, unsigned int* index) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetIndex(device, index)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetIndex(__pyx_v_device, __pyx_v_index); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 131, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":130
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetIndex(nvmlDevice_t device, unsigned int* index) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetIndex(device, index)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetIndex", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":134
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSerial(nvmlDevice_t device, char* serial, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSerial(device, serial, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSerial(nvmlDevice_t __pyx_v_device, char *__pyx_v_serial, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":135
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSerial(nvmlDevice_t device, char* serial, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetSerial(device, serial, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSerial(__pyx_v_device, __pyx_v_serial, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 135, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":134
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSerial(nvmlDevice_t device, char* serial, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSerial(device, serial, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetSerial", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":138
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetModuleId(nvmlDevice_t device, unsigned int* moduleId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetModuleId(device, moduleId)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetModuleId(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_moduleId) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":139
 * 
 * cdef nvmlReturn_t nvmlDeviceGetModuleId(nvmlDevice_t device, unsigned int* moduleId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetModuleId(device, moduleId)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetModuleId(__pyx_v_device, __pyx_v_moduleId); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 139, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":138
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetModuleId(nvmlDevice_t device, unsigned int* moduleId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetModuleId(device, moduleId)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetModuleId", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":142
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetC2cModeInfoV(nvmlDevice_t device, nvmlC2cModeInfo_v1_t* c2cModeInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetC2cModeInfoV(device, c2cModeInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetC2cModeInfoV(nvmlDevice_t __pyx_v_device, nvmlC2cModeInfo_v1_t *__pyx_v_c2cModeInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":143
 * 
 * cdef nvmlReturn_t nvmlDeviceGetC2cModeInfoV(nvmlDevice_t device, nvmlC2cModeInfo_v1_t* c2cModeInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetC2cModeInfoV(device, c2cModeInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetC2cModeInfoV(__pyx_v_device, __pyx_v_c2cModeInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 143, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":142
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetC2cModeInfoV(nvmlDevice_t device, nvmlC2cModeInfo_v1_t* c2cModeInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetC2cModeInfoV(device, c2cModeInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetC2cModeInfoV", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":146
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryAffinity(nvmlDevice_t device, unsigned int nodeSetSize, unsigned long* nodeSet, nvmlAffinityScope_t scope) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemoryAffinity(device, nodeSetSize, nodeSet, scope)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryAffinity(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_nodeSetSize, unsigned long *__pyx_v_nodeSet, nvmlAffinityScope_t __pyx_v_scope) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":147
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryAffinity(nvmlDevice_t device, unsigned int nodeSetSize, unsigned long* nodeSet, nvmlAffinityScope_t scope) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMemoryAffinity(device, nodeSetSize, nodeSet, scope)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryAffinity(__pyx_v_device, __pyx_v_nodeSetSize, __pyx_v_nodeSet, __pyx_v_scope); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 147, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":146
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryAffinity(nvmlDevice_t device, unsigned int nodeSetSize, unsigned long* nodeSet, nvmlAffinityScope_t scope) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemoryAffinity(device, nodeSetSize, nodeSet, scope)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMemoryAffinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":150
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCpuAffinityWithinScope(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long* cpuSet, nvmlAffinityScope_t scope) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCpuAffinityWithinScope(device, cpuSetSize, cpuSet, scope)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCpuAffinityWithinScope(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_cpuSetSize, unsigned long *__pyx_v_cpuSet, nvmlAffinityScope_t __pyx_v_scope) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":151
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCpuAffinityWithinScope(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long* cpuSet, nvmlAffinityScope_t scope) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCpuAffinityWithinScope(device, cpuSetSize, cpuSet, scope)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCpuAffinityWithinScope(__pyx_v_device, __pyx_v_cpuSetSize, __pyx_v_cpuSet, __pyx_v_scope); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 151, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":150
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCpuAffinityWithinScope(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long* cpuSet, nvmlAffinityScope_t scope) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCpuAffinityWithinScope(device, cpuSetSize, cpuSet, scope)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCpuAffinityWithinScope", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":154
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCpuAffinity(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long* cpuSet) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCpuAffinity(device, cpuSetSize, cpuSet)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCpuAffinity(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_cpuSetSize, unsigned long *__pyx_v_cpuSet) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":155
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCpuAffinity(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long* cpuSet) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCpuAffinity(device, cpuSetSize, cpuSet)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCpuAffinity(__pyx_v_device, __pyx_v_cpuSetSize, __pyx_v_cpuSet); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 155, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":154
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCpuAffinity(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long* cpuSet) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCpuAffinity(device, cpuSetSize, cpuSet)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCpuAffinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":158
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetCpuAffinity(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetCpuAffinity(device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetCpuAffinity(nvmlDevice_t __pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":159
 * 
 * cdef nvmlReturn_t nvmlDeviceSetCpuAffinity(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetCpuAffinity(device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetCpuAffinity(__pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 159, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":158
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetCpuAffinity(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetCpuAffinity(device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetCpuAffinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":162
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceClearCpuAffinity(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceClearCpuAffinity(device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearCpuAffinity(nvmlDevice_t __pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":163
 * 
 * cdef nvmlReturn_t nvmlDeviceClearCpuAffinity(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceClearCpuAffinity(device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearCpuAffinity(__pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 163, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":162
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceClearCpuAffinity(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceClearCpuAffinity(device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceClearCpuAffinity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":166
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNumaNodeId(nvmlDevice_t device, unsigned int* node) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNumaNodeId(device, node)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumaNodeId(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_node) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":167
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNumaNodeId(nvmlDevice_t device, unsigned int* node) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNumaNodeId(device, node)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNumaNodeId(__pyx_v_device, __pyx_v_node); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 167, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":166
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNumaNodeId(nvmlDevice_t device, unsigned int* node) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNumaNodeId(device, node)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNumaNodeId", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":170
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuTopologyLevel_t* pathInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTopologyCommonAncestor(device1, device2, pathInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t __pyx_v_device1, nvmlDevice_t __pyx_v_device2, nvmlGpuTopologyLevel_t *__pyx_v_pathInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":171
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuTopologyLevel_t* pathInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetTopologyCommonAncestor(device1, device2, pathInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTopologyCommonAncestor(__pyx_v_device1, __pyx_v_device2, __pyx_v_pathInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 171, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":170
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuTopologyLevel_t* pathInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTopologyCommonAncestor(device1, device2, pathInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetTopologyCommonAncestor", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":174
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t device, nvmlGpuTopologyLevel_t level, unsigned int* count, nvmlDevice_t* deviceArray) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTopologyNearestGpus(device, level, count, deviceArray)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t __pyx_v_device, nvmlGpuTopologyLevel_t __pyx_v_level, unsigned int *__pyx_v_count, nvmlDevice_t *__pyx_v_deviceArray) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":175
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t device, nvmlGpuTopologyLevel_t level, unsigned int* count, nvmlDevice_t* deviceArray) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetTopologyNearestGpus(device, level, count, deviceArray)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTopologyNearestGpus(__pyx_v_device, __pyx_v_level, __pyx_v_count, __pyx_v_deviceArray); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 175, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":174
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t device, nvmlGpuTopologyLevel_t level, unsigned int* count, nvmlDevice_t* deviceArray) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTopologyNearestGpus(device, level, count, deviceArray)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetTopologyNearestGpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":178
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetP2PStatus(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuP2PCapsIndex_t p2pIndex, nvmlGpuP2PStatus_t* p2pStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetP2PStatus(device1, device2, p2pIndex, p2pStatus)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetP2PStatus(nvmlDevice_t __pyx_v_device1, nvmlDevice_t __pyx_v_device2, nvmlGpuP2PCapsIndex_t __pyx_v_p2pIndex, nvmlGpuP2PStatus_t *__pyx_v_p2pStatus) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":179
 * 
 * cdef nvmlReturn_t nvmlDeviceGetP2PStatus(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuP2PCapsIndex_t p2pIndex, nvmlGpuP2PStatus_t* p2pStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetP2PStatus(device1, device2, p2pIndex, p2pStatus)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetP2PStatus(__pyx_v_device1, __pyx_v_device2, __pyx_v_p2pIndex, __pyx_v_p2pStatus); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 179, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":178
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetP2PStatus(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuP2PCapsIndex_t p2pIndex, nvmlGpuP2PStatus_t* p2pStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetP2PStatus(device1, device2, p2pIndex, p2pStatus)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetP2PStatus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":182
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetUUID(nvmlDevice_t device, char* uuid, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetUUID(device, uuid, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetUUID(nvmlDevice_t __pyx_v_device, char *__pyx_v_uuid, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":183
 * 
 * cdef nvmlReturn_t nvmlDeviceGetUUID(nvmlDevice_t device, char* uuid, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetUUID(device, uuid, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetUUID(__pyx_v_device, __pyx_v_uuid, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 183, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":182
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetUUID(nvmlDevice_t device, char* uuid, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetUUID(device, uuid, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetUUID", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":186
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int* minorNumber) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMinorNumber(device, minorNumber)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinorNumber(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_minorNumber) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":187
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int* minorNumber) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMinorNumber(device, minorNumber)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMinorNumber(__pyx_v_device, __pyx_v_minorNumber); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 187, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":186
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int* minorNumber) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMinorNumber(device, minorNumber)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMinorNumber", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":190
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBoardPartNumber(nvmlDevice_t device, char* partNumber, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBoardPartNumber(device, partNumber, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBoardPartNumber(nvmlDevice_t __pyx_v_device, char *__pyx_v_partNumber, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":191
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBoardPartNumber(nvmlDevice_t device, char* partNumber, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetBoardPartNumber(device, partNumber, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBoardPartNumber(__pyx_v_device, __pyx_v_partNumber, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 191, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":190
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBoardPartNumber(nvmlDevice_t device, char* partNumber, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBoardPartNumber(device, partNumber, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetBoardPartNumber", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":194
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetInforomVersion(nvmlDevice_t device, nvmlInforomObject_t object, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetInforomVersion(device, object, version, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomVersion(nvmlDevice_t __pyx_v_device, nvmlInforomObject_t __pyx_v_object, char *__pyx_v_version, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":195
 * 
 * cdef nvmlReturn_t nvmlDeviceGetInforomVersion(nvmlDevice_t device, nvmlInforomObject_t object, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetInforomVersion(device, object, version, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetInforomVersion(__pyx_v_device, __pyx_v_object, __pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 195, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":194
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetInforomVersion(nvmlDevice_t device, nvmlInforomObject_t object, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetInforomVersion(device, object, version, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetInforomVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":198
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetInforomImageVersion(nvmlDevice_t device, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetInforomImageVersion(device, version, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomImageVersion(nvmlDevice_t __pyx_v_device, char *__pyx_v_version, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":199
 * 
 * cdef nvmlReturn_t nvmlDeviceGetInforomImageVersion(nvmlDevice_t device, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetInforomImageVersion(device, version, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetInforomImageVersion(__pyx_v_device, __pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 199, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":198
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetInforomImageVersion(nvmlDevice_t device, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetInforomImageVersion(device, version, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetInforomImageVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":202
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t device, unsigned int* checksum) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetInforomConfigurationChecksum(device, checksum)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_checksum) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":203
 * 
 * cdef nvmlReturn_t nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t device, unsigned int* checksum) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetInforomConfigurationChecksum(device, checksum)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetInforomConfigurationChecksum(__pyx_v_device, __pyx_v_checksum); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 203, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":202
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t device, unsigned int* checksum) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetInforomConfigurationChecksum(device, checksum)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetInforomConfigurationChecksum", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":206
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceValidateInforom(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceValidateInforom(device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceValidateInforom(nvmlDevice_t __pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":207
 * 
 * cdef nvmlReturn_t nvmlDeviceValidateInforom(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceValidateInforom(device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceValidateInforom(__pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 207, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":206
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceValidateInforom(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceValidateInforom(device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceValidateInforom", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":210
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetLastBBXFlushTime(nvmlDevice_t device, unsigned long long* timestamp, unsigned long* durationUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetLastBBXFlushTime(device, timestamp, durationUs)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetLastBBXFlushTime(nvmlDevice_t __pyx_v_device, unsigned PY_LONG_LONG *__pyx_v_timestamp, unsigned long *__pyx_v_durationUs) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":211
 * 
 * cdef nvmlReturn_t nvmlDeviceGetLastBBXFlushTime(nvmlDevice_t device, unsigned long long* timestamp, unsigned long* durationUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetLastBBXFlushTime(device, timestamp, durationUs)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetLastBBXFlushTime(__pyx_v_device, __pyx_v_timestamp, __pyx_v_durationUs); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 211, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":210
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetLastBBXFlushTime(nvmlDevice_t device, unsigned long long* timestamp, unsigned long* durationUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetLastBBXFlushTime(device, timestamp, durationUs)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetLastBBXFlushTime", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":214
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDisplayMode(nvmlDevice_t device, nvmlEnableState_t* display) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDisplayMode(device, display)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDisplayMode(nvmlDevice_t __pyx_v_device, nvmlEnableState_t *__pyx_v_display) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":215
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDisplayMode(nvmlDevice_t device, nvmlEnableState_t* display) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetDisplayMode(device, display)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDisplayMode(__pyx_v_device, __pyx_v_display); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 215, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":214
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDisplayMode(nvmlDevice_t device, nvmlEnableState_t* display) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDisplayMode(device, display)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetDisplayMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":218
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDisplayActive(nvmlDevice_t device, nvmlEnableState_t* isActive) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDisplayActive(device, isActive)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDisplayActive(nvmlDevice_t __pyx_v_device, nvmlEnableState_t *__pyx_v_isActive) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":219
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDisplayActive(nvmlDevice_t device, nvmlEnableState_t* isActive) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetDisplayActive(device, isActive)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDisplayActive(__pyx_v_device, __pyx_v_isActive); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 219, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":218
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDisplayActive(nvmlDevice_t device, nvmlEnableState_t* isActive) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDisplayActive(device, isActive)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetDisplayActive", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":222
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPersistenceMode(device, mode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPersistenceMode(nvmlDevice_t __pyx_v_device, nvmlEnableState_t *__pyx_v_mode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":223
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPersistenceMode(device, mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPersistenceMode(__pyx_v_device, __pyx_v_mode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 223, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":222
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPersistenceMode(device, mode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPersistenceMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":226
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPciInfoExt(nvmlDevice_t device, nvmlPciInfoExt_t* pci) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPciInfoExt(device, pci)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPciInfoExt(nvmlDevice_t __pyx_v_device, nvmlPciInfoExt_t *__pyx_v_pci) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":227
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPciInfoExt(nvmlDevice_t device, nvmlPciInfoExt_t* pci) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPciInfoExt(device, pci)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPciInfoExt(__pyx_v_device, __pyx_v_pci); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 227, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":226
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPciInfoExt(nvmlDevice_t device, nvmlPciInfoExt_t* pci) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPciInfoExt(device, pci)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPciInfoExt", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":230
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPciInfo_v3(nvmlDevice_t device, nvmlPciInfo_t* pci) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPciInfo_v3(device, pci)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPciInfo_v3(nvmlDevice_t __pyx_v_device, nvmlPciInfo_t *__pyx_v_pci) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":231
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPciInfo_v3(nvmlDevice_t device, nvmlPciInfo_t* pci) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPciInfo_v3(device, pci)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPciInfo_v3(__pyx_v_device, __pyx_v_pci); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 231, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":230
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPciInfo_v3(nvmlDevice_t device, nvmlPciInfo_t* pci) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPciInfo_v3(device, pci)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPciInfo_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":234
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int* maxLinkGen) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMaxPcieLinkGeneration(device, maxLinkGen)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_maxLinkGen) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":235
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int* maxLinkGen) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMaxPcieLinkGeneration(device, maxLinkGen)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxPcieLinkGeneration(__pyx_v_device, __pyx_v_maxLinkGen); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 235, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":234
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int* maxLinkGen) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMaxPcieLinkGeneration(device, maxLinkGen)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMaxPcieLinkGeneration", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":238
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int* maxLinkGenDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuMaxPcieLinkGeneration(device, maxLinkGenDevice)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuMaxPcieLinkGeneration(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_maxLinkGenDevice) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":239
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int* maxLinkGenDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpuMaxPcieLinkGeneration(device, maxLinkGenDevice)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuMaxPcieLinkGeneration(__pyx_v_device, __pyx_v_maxLinkGenDevice); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 239, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":238
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int* maxLinkGenDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuMaxPcieLinkGeneration(device, maxLinkGenDevice)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpuMaxPcieLinkGeneration", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":242
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice_t device, unsigned int* maxLinkWidth) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMaxPcieLinkWidth(device, maxLinkWidth)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_maxLinkWidth) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":243
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice_t device, unsigned int* maxLinkWidth) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMaxPcieLinkWidth(device, maxLinkWidth)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxPcieLinkWidth(__pyx_v_device, __pyx_v_maxLinkWidth); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 243, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":242
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice_t device, unsigned int* maxLinkWidth) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMaxPcieLinkWidth(device, maxLinkWidth)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMaxPcieLinkWidth", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":246
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice_t device, unsigned int* currLinkGen) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCurrPcieLinkGeneration(device, currLinkGen)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_currLinkGen) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":247
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice_t device, unsigned int* currLinkGen) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCurrPcieLinkGeneration(device, currLinkGen)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrPcieLinkGeneration(__pyx_v_device, __pyx_v_currLinkGen); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 247, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":246
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice_t device, unsigned int* currLinkGen) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCurrPcieLinkGeneration(device, currLinkGen)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCurrPcieLinkGeneration", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":250
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice_t device, unsigned int* currLinkWidth) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCurrPcieLinkWidth(device, currLinkWidth)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_currLinkWidth) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":251
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice_t device, unsigned int* currLinkWidth) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCurrPcieLinkWidth(device, currLinkWidth)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrPcieLinkWidth(__pyx_v_device, __pyx_v_currLinkWidth); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 251, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":250
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice_t device, unsigned int* currLinkWidth) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCurrPcieLinkWidth(device, currLinkWidth)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCurrPcieLinkWidth", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":254
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieThroughput(nvmlDevice_t device, nvmlPcieUtilCounter_t counter, unsigned int* value) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPcieThroughput(device, counter, value)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieThroughput(nvmlDevice_t __pyx_v_device, nvmlPcieUtilCounter_t __pyx_v_counter, unsigned int *__pyx_v_value) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":255
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieThroughput(nvmlDevice_t device, nvmlPcieUtilCounter_t counter, unsigned int* value) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPcieThroughput(device, counter, value)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieThroughput(__pyx_v_device, __pyx_v_counter, __pyx_v_value); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 255, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":254
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieThroughput(nvmlDevice_t device, nvmlPcieUtilCounter_t counter, unsigned int* value) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPcieThroughput(device, counter, value)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPcieThroughput", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":258
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieReplayCounter(nvmlDevice_t device, unsigned int* value) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPcieReplayCounter(device, value)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieReplayCounter(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_value) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":259
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieReplayCounter(nvmlDevice_t device, unsigned int* value) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPcieReplayCounter(device, value)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieReplayCounter(__pyx_v_device, __pyx_v_value); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 259, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":258
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieReplayCounter(nvmlDevice_t device, unsigned int* value) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPcieReplayCounter(device, value)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPcieReplayCounter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":262
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int* clock) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetClockInfo(device, type, clock)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClockInfo(nvmlDevice_t __pyx_v_device, nvmlClockType_t __pyx_v_type, unsigned int *__pyx_v_clock) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":263
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int* clock) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetClockInfo(device, type, clock)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClockInfo(__pyx_v_device, __pyx_v_type, __pyx_v_clock); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 263, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":262
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int* clock) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetClockInfo(device, type, clock)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetClockInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":266
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int* clock) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMaxClockInfo(device, type, clock)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxClockInfo(nvmlDevice_t __pyx_v_device, nvmlClockType_t __pyx_v_type, unsigned int *__pyx_v_clock) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":267
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int* clock) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMaxClockInfo(device, type, clock)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxClockInfo(__pyx_v_device, __pyx_v_type, __pyx_v_clock); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 267, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":266
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int* clock) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMaxClockInfo(device, type, clock)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMaxClockInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":270
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpcClkVfOffset(nvmlDevice_t device, int* offset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpcClkVfOffset(device, offset)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpcClkVfOffset(nvmlDevice_t __pyx_v_device, int *__pyx_v_offset) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":271
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpcClkVfOffset(nvmlDevice_t device, int* offset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpcClkVfOffset(device, offset)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpcClkVfOffset(__pyx_v_device, __pyx_v_offset); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 271, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":270
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpcClkVfOffset(nvmlDevice_t device, int* offset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpcClkVfOffset(device, offset)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpcClkVfOffset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":274
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClock(nvmlDevice_t device, nvmlClockType_t clockType, nvmlClockId_t clockId, unsigned int* clockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetClock(device, clockType, clockId, clockMHz)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClock(nvmlDevice_t __pyx_v_device, nvmlClockType_t __pyx_v_clockType, nvmlClockId_t __pyx_v_clockId, unsigned int *__pyx_v_clockMHz) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":275
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClock(nvmlDevice_t device, nvmlClockType_t clockType, nvmlClockId_t clockId, unsigned int* clockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetClock(device, clockType, clockId, clockMHz)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClock(__pyx_v_device, __pyx_v_clockType, __pyx_v_clockId, __pyx_v_clockMHz); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 275, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":274
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClock(nvmlDevice_t device, nvmlClockType_t clockType, nvmlClockId_t clockId, unsigned int* clockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetClock(device, clockType, clockId, clockMHz)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetClock", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":278
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int* clockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMaxCustomerBoostClock(device, clockType, clockMHz)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice_t __pyx_v_device, nvmlClockType_t __pyx_v_clockType, unsigned int *__pyx_v_clockMHz) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":279
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int* clockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMaxCustomerBoostClock(device, clockType, clockMHz)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxCustomerBoostClock(__pyx_v_device, __pyx_v_clockType, __pyx_v_clockMHz); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 279, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":278
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int* clockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMaxCustomerBoostClock(device, clockType, clockMHz)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMaxCustomerBoostClock", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":282
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t device, unsigned int* count, unsigned int* clocksMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedMemoryClocks(device, count, clocksMHz)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_count, unsigned int *__pyx_v_clocksMHz) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":283
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t device, unsigned int* count, unsigned int* clocksMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetSupportedMemoryClocks(device, count, clocksMHz)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedMemoryClocks(__pyx_v_device, __pyx_v_count, __pyx_v_clocksMHz); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 283, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":282
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t device, unsigned int* count, unsigned int* clocksMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedMemoryClocks(device, count, clocksMHz)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetSupportedMemoryClocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":286
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int* count, unsigned int* clocksMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedGraphicsClocks(device, memoryClockMHz, count, clocksMHz)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_memoryClockMHz, unsigned int *__pyx_v_count, unsigned int *__pyx_v_clocksMHz) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":287
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int* count, unsigned int* clocksMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetSupportedGraphicsClocks(device, memoryClockMHz, count, clocksMHz)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedGraphicsClocks(__pyx_v_device, __pyx_v_memoryClockMHz, __pyx_v_count, __pyx_v_clocksMHz); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 287, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":286
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int* count, unsigned int* clocksMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedGraphicsClocks(device, memoryClockMHz, count, clocksMHz)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetSupportedGraphicsClocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":290
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t* isEnabled, nvmlEnableState_t* defaultIsEnabled) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAutoBoostedClocksEnabled(device, isEnabled, defaultIsEnabled)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t __pyx_v_device, nvmlEnableState_t *__pyx_v_isEnabled, nvmlEnableState_t *__pyx_v_defaultIsEnabled) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":291
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t* isEnabled, nvmlEnableState_t* defaultIsEnabled) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetAutoBoostedClocksEnabled(device, isEnabled, defaultIsEnabled)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAutoBoostedClocksEnabled(__pyx_v_device, __pyx_v_isEnabled, __pyx_v_defaultIsEnabled); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 291, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":290
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t* isEnabled, nvmlEnableState_t* defaultIsEnabled) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAutoBoostedClocksEnabled(device, isEnabled, defaultIsEnabled)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetAutoBoostedClocksEnabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":294
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int* speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFanSpeed(device, speed)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeed(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_speed) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":295
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int* speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetFanSpeed(device, speed)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanSpeed(__pyx_v_device, __pyx_v_speed); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 295, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":294
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int* speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFanSpeed(device, speed)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetFanSpeed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":298
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int* speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFanSpeed_v2(device, fan, speed)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeed_v2(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_fan, unsigned int *__pyx_v_speed) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":299
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int* speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetFanSpeed_v2(device, fan, speed)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanSpeed_v2(__pyx_v_device, __pyx_v_fan, __pyx_v_speed); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 299, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":298
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int* speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFanSpeed_v2(device, fan, speed)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetFanSpeed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":302
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanSpeedRPM(nvmlDevice_t device, nvmlFanSpeedInfo_t* fanSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFanSpeedRPM(device, fanSpeed)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeedRPM(nvmlDevice_t __pyx_v_device, nvmlFanSpeedInfo_t *__pyx_v_fanSpeed) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":303
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanSpeedRPM(nvmlDevice_t device, nvmlFanSpeedInfo_t* fanSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetFanSpeedRPM(device, fanSpeed)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanSpeedRPM(__pyx_v_device, __pyx_v_fanSpeed); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 303, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":302
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanSpeedRPM(nvmlDevice_t device, nvmlFanSpeedInfo_t* fanSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFanSpeedRPM(device, fanSpeed)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetFanSpeedRPM", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":306
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTargetFanSpeed(nvmlDevice_t device, unsigned int fan, unsigned int* targetSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTargetFanSpeed(device, fan, targetSpeed)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTargetFanSpeed(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_fan, unsigned int *__pyx_v_targetSpeed) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":307
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTargetFanSpeed(nvmlDevice_t device, unsigned int fan, unsigned int* targetSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetTargetFanSpeed(device, fan, targetSpeed)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTargetFanSpeed(__pyx_v_device, __pyx_v_fan, __pyx_v_targetSpeed); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 307, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":306
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTargetFanSpeed(nvmlDevice_t device, unsigned int fan, unsigned int* targetSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTargetFanSpeed(device, fan, targetSpeed)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetTargetFanSpeed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":310
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMinMaxFanSpeed(nvmlDevice_t device, unsigned int* minSpeed, unsigned int* maxSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMinMaxFanSpeed(device, minSpeed, maxSpeed)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinMaxFanSpeed(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_minSpeed, unsigned int *__pyx_v_maxSpeed) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":311
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMinMaxFanSpeed(nvmlDevice_t device, unsigned int* minSpeed, unsigned int* maxSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMinMaxFanSpeed(device, minSpeed, maxSpeed)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMinMaxFanSpeed(__pyx_v_device, __pyx_v_minSpeed, __pyx_v_maxSpeed); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 311, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":310
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMinMaxFanSpeed(nvmlDevice_t device, unsigned int* minSpeed, unsigned int* maxSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMinMaxFanSpeed(device, minSpeed, maxSpeed)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMinMaxFanSpeed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":314
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanControlPolicy_v2(nvmlDevice_t device, unsigned int fan, nvmlFanControlPolicy_t* policy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFanControlPolicy_v2(device, fan, policy)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanControlPolicy_v2(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_fan, nvmlFanControlPolicy_t *__pyx_v_policy) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":315
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanControlPolicy_v2(nvmlDevice_t device, unsigned int fan, nvmlFanControlPolicy_t* policy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetFanControlPolicy_v2(device, fan, policy)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanControlPolicy_v2(__pyx_v_device, __pyx_v_fan, __pyx_v_policy); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 315, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":314
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFanControlPolicy_v2(nvmlDevice_t device, unsigned int fan, nvmlFanControlPolicy_t* policy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFanControlPolicy_v2(device, fan, policy)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetFanControlPolicy_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":318
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNumFans(nvmlDevice_t device, unsigned int* numFans) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNumFans(device, numFans)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumFans(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_numFans) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":319
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNumFans(nvmlDevice_t device, unsigned int* numFans) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNumFans(device, numFans)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNumFans(__pyx_v_device, __pyx_v_numFans); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 319, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":318
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNumFans(nvmlDevice_t device, unsigned int* numFans) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNumFans(device, numFans)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNumFans", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":322
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCoolerInfo(nvmlDevice_t device, nvmlCoolerInfo_t* coolerInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCoolerInfo(device, coolerInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCoolerInfo(nvmlDevice_t __pyx_v_device, nvmlCoolerInfo_t *__pyx_v_coolerInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":323
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCoolerInfo(nvmlDevice_t device, nvmlCoolerInfo_t* coolerInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCoolerInfo(device, coolerInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCoolerInfo(__pyx_v_device, __pyx_v_coolerInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 323, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":322
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCoolerInfo(nvmlDevice_t device, nvmlCoolerInfo_t* coolerInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCoolerInfo(device, coolerInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCoolerInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":326
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTemperatureV(nvmlDevice_t device, nvmlTemperature_t* temperature) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTemperatureV(device, temperature)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTemperatureV(nvmlDevice_t __pyx_v_device, nvmlTemperature_t *__pyx_v_temperature) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":327
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTemperatureV(nvmlDevice_t device, nvmlTemperature_t* temperature) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetTemperatureV(device, temperature)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTemperatureV(__pyx_v_device, __pyx_v_temperature); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 327, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":326
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTemperatureV(nvmlDevice_t device, nvmlTemperature_t* temperature) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTemperatureV(device, temperature)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetTemperatureV", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":330
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTemperatureThreshold(device, thresholdType, temp)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTemperatureThreshold(nvmlDevice_t __pyx_v_device, nvmlTemperatureThresholds_t __pyx_v_thresholdType, unsigned int *__pyx_v_temp) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":331
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetTemperatureThreshold(device, thresholdType, temp)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTemperatureThreshold(__pyx_v_device, __pyx_v_thresholdType, __pyx_v_temp); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 331, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":330
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTemperatureThreshold(device, thresholdType, temp)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetTemperatureThreshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":334
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMarginTemperature(nvmlDevice_t device, nvmlMarginTemperature_t* marginTempInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMarginTemperature(device, marginTempInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMarginTemperature(nvmlDevice_t __pyx_v_device, nvmlMarginTemperature_t *__pyx_v_marginTempInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":335
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMarginTemperature(nvmlDevice_t device, nvmlMarginTemperature_t* marginTempInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMarginTemperature(device, marginTempInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMarginTemperature(__pyx_v_device, __pyx_v_marginTempInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 335, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":334
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMarginTemperature(nvmlDevice_t device, nvmlMarginTemperature_t* marginTempInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMarginTemperature(device, marginTempInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMarginTemperature", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":338
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetThermalSettings(nvmlDevice_t device, unsigned int sensorIndex, nvmlGpuThermalSettings_t* pThermalSettings) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetThermalSettings(device, sensorIndex, pThermalSettings)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetThermalSettings(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_sensorIndex, nvmlGpuThermalSettings_t *__pyx_v_pThermalSettings) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":339
 * 
 * cdef nvmlReturn_t nvmlDeviceGetThermalSettings(nvmlDevice_t device, unsigned int sensorIndex, nvmlGpuThermalSettings_t* pThermalSettings) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetThermalSettings(device, sensorIndex, pThermalSettings)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetThermalSettings(__pyx_v_device, __pyx_v_sensorIndex, __pyx_v_pThermalSettings); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 339, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":338
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetThermalSettings(nvmlDevice_t device, unsigned int sensorIndex, nvmlGpuThermalSettings_t* pThermalSettings) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetThermalSettings(device, sensorIndex, pThermalSettings)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetThermalSettings", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":342
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t* pState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPerformanceState(device, pState)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPerformanceState(nvmlDevice_t __pyx_v_device, nvmlPstates_t *__pyx_v_pState) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":343
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t* pState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPerformanceState(device, pState)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPerformanceState(__pyx_v_device, __pyx_v_pState); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 343, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":342
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t* pState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPerformanceState(device, pState)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPerformanceState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":346
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrentClocksEventReasons(nvmlDevice_t device, unsigned long long* clocksEventReasons) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCurrentClocksEventReasons(device, clocksEventReasons)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrentClocksEventReasons(nvmlDevice_t __pyx_v_device, unsigned PY_LONG_LONG *__pyx_v_clocksEventReasons) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":347
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrentClocksEventReasons(nvmlDevice_t device, unsigned long long* clocksEventReasons) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCurrentClocksEventReasons(device, clocksEventReasons)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrentClocksEventReasons(__pyx_v_device, __pyx_v_clocksEventReasons); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 347, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":346
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrentClocksEventReasons(nvmlDevice_t device, unsigned long long* clocksEventReasons) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCurrentClocksEventReasons(device, clocksEventReasons)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCurrentClocksEventReasons", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":350
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedClocksEventReasons(nvmlDevice_t device, unsigned long long* supportedClocksEventReasons) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedClocksEventReasons(device, supportedClocksEventReasons)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedClocksEventReasons(nvmlDevice_t __pyx_v_device, unsigned PY_LONG_LONG *__pyx_v_supportedClocksEventReasons) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":351
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedClocksEventReasons(nvmlDevice_t device, unsigned long long* supportedClocksEventReasons) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetSupportedClocksEventReasons(device, supportedClocksEventReasons)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedClocksEventReasons(__pyx_v_device, __pyx_v_supportedClocksEventReasons); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 351, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":350
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedClocksEventReasons(nvmlDevice_t device, unsigned long long* supportedClocksEventReasons) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedClocksEventReasons(device, supportedClocksEventReasons)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetSupportedClocksEventReasons", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":354
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t* pState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerState(device, pState)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerState(nvmlDevice_t __pyx_v_device, nvmlPstates_t *__pyx_v_pState) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":355
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t* pState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPowerState(device, pState)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerState(__pyx_v_device, __pyx_v_pState); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 355, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":354
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t* pState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerState(device, pState)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPowerState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":358
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDynamicPstatesInfo(nvmlDevice_t device, nvmlGpuDynamicPstatesInfo_t* pDynamicPstatesInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDynamicPstatesInfo(device, pDynamicPstatesInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDynamicPstatesInfo(nvmlDevice_t __pyx_v_device, nvmlGpuDynamicPstatesInfo_t *__pyx_v_pDynamicPstatesInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":359
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDynamicPstatesInfo(nvmlDevice_t device, nvmlGpuDynamicPstatesInfo_t* pDynamicPstatesInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetDynamicPstatesInfo(device, pDynamicPstatesInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDynamicPstatesInfo(__pyx_v_device, __pyx_v_pDynamicPstatesInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 359, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":358
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDynamicPstatesInfo(nvmlDevice_t device, nvmlGpuDynamicPstatesInfo_t* pDynamicPstatesInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDynamicPstatesInfo(device, pDynamicPstatesInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetDynamicPstatesInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":362
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemClkVfOffset(nvmlDevice_t device, int* offset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemClkVfOffset(device, offset)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemClkVfOffset(nvmlDevice_t __pyx_v_device, int *__pyx_v_offset) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":363
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemClkVfOffset(nvmlDevice_t device, int* offset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMemClkVfOffset(device, offset)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemClkVfOffset(__pyx_v_device, __pyx_v_offset); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 363, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":362
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemClkVfOffset(nvmlDevice_t device, int* offset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemClkVfOffset(device, offset)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMemClkVfOffset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":366
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMinMaxClockOfPState(nvmlDevice_t device, nvmlClockType_t type, nvmlPstates_t pstate, unsigned int* minClockMHz, unsigned int* maxClockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMinMaxClockOfPState(device, type, pstate, minClockMHz, maxClockMHz)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinMaxClockOfPState(nvmlDevice_t __pyx_v_device, nvmlClockType_t __pyx_v_type, nvmlPstates_t __pyx_v_pstate, unsigned int *__pyx_v_minClockMHz, unsigned int *__pyx_v_maxClockMHz) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":367
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMinMaxClockOfPState(nvmlDevice_t device, nvmlClockType_t type, nvmlPstates_t pstate, unsigned int* minClockMHz, unsigned int* maxClockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMinMaxClockOfPState(device, type, pstate, minClockMHz, maxClockMHz)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMinMaxClockOfPState(__pyx_v_device, __pyx_v_type, __pyx_v_pstate, __pyx_v_minClockMHz, __pyx_v_maxClockMHz); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 367, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":366
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMinMaxClockOfPState(nvmlDevice_t device, nvmlClockType_t type, nvmlPstates_t pstate, unsigned int* minClockMHz, unsigned int* maxClockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMinMaxClockOfPState(device, type, pstate, minClockMHz, maxClockMHz)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMinMaxClockOfPState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":370
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedPerformanceStates(nvmlDevice_t device, nvmlPstates_t* pstates, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedPerformanceStates(device, pstates, size)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedPerformanceStates(nvmlDevice_t __pyx_v_device, nvmlPstates_t *__pyx_v_pstates, unsigned int __pyx_v_size) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":371
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedPerformanceStates(nvmlDevice_t device, nvmlPstates_t* pstates, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetSupportedPerformanceStates(device, pstates, size)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedPerformanceStates(__pyx_v_device, __pyx_v_pstates, __pyx_v_size); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 371, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":370
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedPerformanceStates(nvmlDevice_t device, nvmlPstates_t* pstates, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedPerformanceStates(device, pstates, size)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetSupportedPerformanceStates", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":374
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice_t device, int* minOffset, int* maxOffset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpcClkMinMaxVfOffset(device, minOffset, maxOffset)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice_t __pyx_v_device, int *__pyx_v_minOffset, int *__pyx_v_maxOffset) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":375
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice_t device, int* minOffset, int* maxOffset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpcClkMinMaxVfOffset(device, minOffset, maxOffset)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpcClkMinMaxVfOffset(__pyx_v_device, __pyx_v_minOffset, __pyx_v_maxOffset); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 375, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":374
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice_t device, int* minOffset, int* maxOffset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpcClkMinMaxVfOffset(device, minOffset, maxOffset)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpcClkMinMaxVfOffset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":378
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice_t device, int* minOffset, int* maxOffset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemClkMinMaxVfOffset(device, minOffset, maxOffset)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice_t __pyx_v_device, int *__pyx_v_minOffset, int *__pyx_v_maxOffset) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":379
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice_t device, int* minOffset, int* maxOffset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMemClkMinMaxVfOffset(device, minOffset, maxOffset)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemClkMinMaxVfOffset(__pyx_v_device, __pyx_v_minOffset, __pyx_v_maxOffset); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 379, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":378
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice_t device, int* minOffset, int* maxOffset) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemClkMinMaxVfOffset(device, minOffset, maxOffset)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMemClkMinMaxVfOffset", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":382
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClockOffsets(nvmlDevice_t device, nvmlClockOffset_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetClockOffsets(device, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClockOffsets(nvmlDevice_t __pyx_v_device, nvmlClockOffset_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":383
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClockOffsets(nvmlDevice_t device, nvmlClockOffset_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetClockOffsets(device, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClockOffsets(__pyx_v_device, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 383, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":382
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClockOffsets(nvmlDevice_t device, nvmlClockOffset_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetClockOffsets(device, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetClockOffsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":386
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetClockOffsets(nvmlDevice_t device, nvmlClockOffset_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetClockOffsets(device, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetClockOffsets(nvmlDevice_t __pyx_v_device, nvmlClockOffset_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":387
 * 
 * cdef nvmlReturn_t nvmlDeviceSetClockOffsets(nvmlDevice_t device, nvmlClockOffset_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetClockOffsets(device, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetClockOffsets(__pyx_v_device, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 387, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":386
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetClockOffsets(nvmlDevice_t device, nvmlClockOffset_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetClockOffsets(device, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetClockOffsets", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":390
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPerformanceModes(nvmlDevice_t device, nvmlDevicePerfModes_t* perfModes) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPerformanceModes(device, perfModes)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPerformanceModes(nvmlDevice_t __pyx_v_device, nvmlDevicePerfModes_t *__pyx_v_perfModes) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":391
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPerformanceModes(nvmlDevice_t device, nvmlDevicePerfModes_t* perfModes) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPerformanceModes(device, perfModes)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPerformanceModes(__pyx_v_device, __pyx_v_perfModes); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 391, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":390
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPerformanceModes(nvmlDevice_t device, nvmlDevicePerfModes_t* perfModes) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPerformanceModes(device, perfModes)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPerformanceModes", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":394
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrentClockFreqs(nvmlDevice_t device, nvmlDeviceCurrentClockFreqs_t* currentClockFreqs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCurrentClockFreqs(device, currentClockFreqs)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrentClockFreqs(nvmlDevice_t __pyx_v_device, nvmlDeviceCurrentClockFreqs_t *__pyx_v_currentClockFreqs) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":395
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrentClockFreqs(nvmlDevice_t device, nvmlDeviceCurrentClockFreqs_t* currentClockFreqs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCurrentClockFreqs(device, currentClockFreqs)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrentClockFreqs(__pyx_v_device, __pyx_v_currentClockFreqs); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 395, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":394
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCurrentClockFreqs(nvmlDevice_t device, nvmlDeviceCurrentClockFreqs_t* currentClockFreqs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCurrentClockFreqs(device, currentClockFreqs)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCurrentClockFreqs", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":398
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerManagementLimit(nvmlDevice_t device, unsigned int* limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerManagementLimit(device, limit)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementLimit(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_limit) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":399
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerManagementLimit(nvmlDevice_t device, unsigned int* limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPowerManagementLimit(device, limit)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerManagementLimit(__pyx_v_device, __pyx_v_limit); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 399, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":398
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerManagementLimit(nvmlDevice_t device, unsigned int* limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerManagementLimit(device, limit)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPowerManagementLimit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":402
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice_t device, unsigned int* minLimit, unsigned int* maxLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerManagementLimitConstraints(device, minLimit, maxLimit)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_minLimit, unsigned int *__pyx_v_maxLimit) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":403
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice_t device, unsigned int* minLimit, unsigned int* maxLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPowerManagementLimitConstraints(device, minLimit, maxLimit)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerManagementLimitConstraints(__pyx_v_device, __pyx_v_minLimit, __pyx_v_maxLimit); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 403, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":402
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice_t device, unsigned int* minLimit, unsigned int* maxLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerManagementLimitConstraints(device, minLimit, maxLimit)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPowerManagementLimitConstraints", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":406
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t device, unsigned int* defaultLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerManagementDefaultLimit(device, defaultLimit)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_defaultLimit) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":407
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t device, unsigned int* defaultLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPowerManagementDefaultLimit(device, defaultLimit)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerManagementDefaultLimit(__pyx_v_device, __pyx_v_defaultLimit); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 407, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":406
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t device, unsigned int* defaultLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerManagementDefaultLimit(device, defaultLimit)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPowerManagementDefaultLimit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":410
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int* power) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerUsage(device, power)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerUsage(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_power) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":411
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int* power) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPowerUsage(device, power)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerUsage(__pyx_v_device, __pyx_v_power); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 411, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":410
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int* power) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerUsage(device, power)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPowerUsage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":414
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTotalEnergyConsumption(nvmlDevice_t device, unsigned long long* energy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTotalEnergyConsumption(device, energy)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTotalEnergyConsumption(nvmlDevice_t __pyx_v_device, unsigned PY_LONG_LONG *__pyx_v_energy) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":415
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTotalEnergyConsumption(nvmlDevice_t device, unsigned long long* energy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetTotalEnergyConsumption(device, energy)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTotalEnergyConsumption(__pyx_v_device, __pyx_v_energy); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 415, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":414
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTotalEnergyConsumption(nvmlDevice_t device, unsigned long long* energy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTotalEnergyConsumption(device, energy)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetTotalEnergyConsumption", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":418
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t device, unsigned int* limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEnforcedPowerLimit(device, limit)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_limit) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":419
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t device, unsigned int* limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetEnforcedPowerLimit(device, limit)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEnforcedPowerLimit(__pyx_v_device, __pyx_v_limit); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 419, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":418
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t device, unsigned int* limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEnforcedPowerLimit(device, limit)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetEnforcedPowerLimit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":422
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t* current, nvmlGpuOperationMode_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuOperationMode(device, current, pending)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuOperationMode(nvmlDevice_t __pyx_v_device, nvmlGpuOperationMode_t *__pyx_v_current, nvmlGpuOperationMode_t *__pyx_v_pending) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":423
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t* current, nvmlGpuOperationMode_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpuOperationMode(device, current, pending)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuOperationMode(__pyx_v_device, __pyx_v_current, __pyx_v_pending); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 423, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":422
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t* current, nvmlGpuOperationMode_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuOperationMode(device, current, pending)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpuOperationMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":426
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryInfo_v2(nvmlDevice_t device, nvmlMemory_v2_t* memory) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemoryInfo_v2(device, memory)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryInfo_v2(nvmlDevice_t __pyx_v_device, nvmlMemory_v2_t *__pyx_v_memory) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":427
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryInfo_v2(nvmlDevice_t device, nvmlMemory_v2_t* memory) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMemoryInfo_v2(device, memory)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryInfo_v2(__pyx_v_device, __pyx_v_memory); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 427, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":426
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryInfo_v2(nvmlDevice_t device, nvmlMemory_v2_t* memory) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemoryInfo_v2(device, memory)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMemoryInfo_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":430
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetComputeMode(nvmlDevice_t device, nvmlComputeMode_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetComputeMode(device, mode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeMode(nvmlDevice_t __pyx_v_device, nvmlComputeMode_t *__pyx_v_mode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":431
 * 
 * cdef nvmlReturn_t nvmlDeviceGetComputeMode(nvmlDevice_t device, nvmlComputeMode_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetComputeMode(device, mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetComputeMode(__pyx_v_device, __pyx_v_mode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 431, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":430
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetComputeMode(nvmlDevice_t device, nvmlComputeMode_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetComputeMode(device, mode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetComputeMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":434
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int* major, int* minor) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCudaComputeCapability(device, major, minor)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCudaComputeCapability(nvmlDevice_t __pyx_v_device, int *__pyx_v_major, int *__pyx_v_minor) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":435
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int* major, int* minor) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCudaComputeCapability(device, major, minor)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCudaComputeCapability(__pyx_v_device, __pyx_v_major, __pyx_v_minor); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 435, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":434
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int* major, int* minor) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCudaComputeCapability(device, major, minor)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCudaComputeCapability", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":438
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDramEncryptionMode(nvmlDevice_t device, nvmlDramEncryptionInfo_t* current, nvmlDramEncryptionInfo_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDramEncryptionMode(device, current, pending)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDramEncryptionMode(nvmlDevice_t __pyx_v_device, nvmlDramEncryptionInfo_t *__pyx_v_current, nvmlDramEncryptionInfo_t *__pyx_v_pending) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":439
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDramEncryptionMode(nvmlDevice_t device, nvmlDramEncryptionInfo_t* current, nvmlDramEncryptionInfo_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetDramEncryptionMode(device, current, pending)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDramEncryptionMode(__pyx_v_device, __pyx_v_current, __pyx_v_pending); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 439, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":438
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDramEncryptionMode(nvmlDevice_t device, nvmlDramEncryptionInfo_t* current, nvmlDramEncryptionInfo_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDramEncryptionMode(device, current, pending)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetDramEncryptionMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":442
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDramEncryptionMode(nvmlDevice_t device, const nvmlDramEncryptionInfo_t* dramEncryption) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetDramEncryptionMode(device, dramEncryption)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDramEncryptionMode(nvmlDevice_t __pyx_v_device, nvmlDramEncryptionInfo_t const *__pyx_v_dramEncryption) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":443
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDramEncryptionMode(nvmlDevice_t device, const nvmlDramEncryptionInfo_t* dramEncryption) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetDramEncryptionMode(device, dramEncryption)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDramEncryptionMode(__pyx_v_device, __pyx_v_dramEncryption); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 443, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":442
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDramEncryptionMode(nvmlDevice_t device, const nvmlDramEncryptionInfo_t* dramEncryption) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetDramEncryptionMode(device, dramEncryption)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetDramEncryptionMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":446
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEccMode(nvmlDevice_t device, nvmlEnableState_t* current, nvmlEnableState_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEccMode(device, current, pending)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEccMode(nvmlDevice_t __pyx_v_device, nvmlEnableState_t *__pyx_v_current, nvmlEnableState_t *__pyx_v_pending) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":447
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEccMode(nvmlDevice_t device, nvmlEnableState_t* current, nvmlEnableState_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetEccMode(device, current, pending)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEccMode(__pyx_v_device, __pyx_v_current, __pyx_v_pending); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 447, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":446
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEccMode(nvmlDevice_t device, nvmlEnableState_t* current, nvmlEnableState_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEccMode(device, current, pending)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetEccMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":450
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDefaultEccMode(nvmlDevice_t device, nvmlEnableState_t* defaultMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDefaultEccMode(device, defaultMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDefaultEccMode(nvmlDevice_t __pyx_v_device, nvmlEnableState_t *__pyx_v_defaultMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":451
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDefaultEccMode(nvmlDevice_t device, nvmlEnableState_t* defaultMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetDefaultEccMode(device, defaultMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDefaultEccMode(__pyx_v_device, __pyx_v_defaultMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 451, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":450
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDefaultEccMode(nvmlDevice_t device, nvmlEnableState_t* defaultMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDefaultEccMode(device, defaultMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetDefaultEccMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":454
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBoardId(nvmlDevice_t device, unsigned int* boardId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBoardId(device, boardId)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBoardId(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_boardId) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":455
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBoardId(nvmlDevice_t device, unsigned int* boardId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetBoardId(device, boardId)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBoardId(__pyx_v_device, __pyx_v_boardId); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 455, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":454
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBoardId(nvmlDevice_t device, unsigned int* boardId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBoardId(device, boardId)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetBoardId", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":458
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMultiGpuBoard(nvmlDevice_t device, unsigned int* multiGpuBool) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMultiGpuBoard(device, multiGpuBool)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMultiGpuBoard(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_multiGpuBool) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":459
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMultiGpuBoard(nvmlDevice_t device, unsigned int* multiGpuBool) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMultiGpuBoard(device, multiGpuBool)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMultiGpuBoard(__pyx_v_device, __pyx_v_multiGpuBool); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 459, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":458
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMultiGpuBoard(nvmlDevice_t device, unsigned int* multiGpuBool) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMultiGpuBoard(device, multiGpuBool)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMultiGpuBoard", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":462
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTotalEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, unsigned long long* eccCounts) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTotalEccErrors(nvmlDevice_t __pyx_v_device, nvmlMemoryErrorType_t __pyx_v_errorType, nvmlEccCounterType_t __pyx_v_counterType, unsigned PY_LONG_LONG *__pyx_v_eccCounts) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":463
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTotalEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, unsigned long long* eccCounts) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTotalEccErrors(__pyx_v_device, __pyx_v_errorType, __pyx_v_counterType, __pyx_v_eccCounts); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 463, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":462
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetTotalEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, unsigned long long* eccCounts) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetTotalEccErrors(device, errorType, counterType, eccCounts)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetTotalEccErrors", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":466
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryErrorCounter(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, nvmlMemoryLocation_t locationType, unsigned long long* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryErrorCounter(nvmlDevice_t __pyx_v_device, nvmlMemoryErrorType_t __pyx_v_errorType, nvmlEccCounterType_t __pyx_v_counterType, nvmlMemoryLocation_t __pyx_v_locationType, unsigned PY_LONG_LONG *__pyx_v_count) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":467
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryErrorCounter(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, nvmlMemoryLocation_t locationType, unsigned long long* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryErrorCounter(__pyx_v_device, __pyx_v_errorType, __pyx_v_counterType, __pyx_v_locationType, __pyx_v_count); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 467, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":466
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryErrorCounter(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, nvmlMemoryLocation_t locationType, unsigned long long* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemoryErrorCounter(device, errorType, counterType, locationType, count)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMemoryErrorCounter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":470
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t* utilization) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetUtilizationRates(device, utilization)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetUtilizationRates(nvmlDevice_t __pyx_v_device, nvmlUtilization_t *__pyx_v_utilization) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":471
 * 
 * cdef nvmlReturn_t nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t* utilization) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetUtilizationRates(device, utilization)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetUtilizationRates(__pyx_v_device, __pyx_v_utilization); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 471, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":470
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t* utilization) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetUtilizationRates(device, utilization)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetUtilizationRates", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":474
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEncoderUtilization(device, utilization, samplingPeriodUs)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderUtilization(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_utilization, unsigned int *__pyx_v_samplingPeriodUs) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":475
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetEncoderUtilization(device, utilization, samplingPeriodUs)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderUtilization(__pyx_v_device, __pyx_v_utilization, __pyx_v_samplingPeriodUs); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 475, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":474
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEncoderUtilization(device, utilization, samplingPeriodUs)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetEncoderUtilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":478
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderCapacity(nvmlDevice_t device, nvmlEncoderType_t encoderQueryType, unsigned int* encoderCapacity) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEncoderCapacity(device, encoderQueryType, encoderCapacity)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderCapacity(nvmlDevice_t __pyx_v_device, nvmlEncoderType_t __pyx_v_encoderQueryType, unsigned int *__pyx_v_encoderCapacity) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":479
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderCapacity(nvmlDevice_t device, nvmlEncoderType_t encoderQueryType, unsigned int* encoderCapacity) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetEncoderCapacity(device, encoderQueryType, encoderCapacity)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderCapacity(__pyx_v_device, __pyx_v_encoderQueryType, __pyx_v_encoderCapacity); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 479, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":478
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderCapacity(nvmlDevice_t device, nvmlEncoderType_t encoderQueryType, unsigned int* encoderCapacity) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEncoderCapacity(device, encoderQueryType, encoderCapacity)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetEncoderCapacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":482
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderStats(nvmlDevice_t device, unsigned int* sessionCount, unsigned int* averageFps, unsigned int* averageLatency) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEncoderStats(device, sessionCount, averageFps, averageLatency)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderStats(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_sessionCount, unsigned int *__pyx_v_averageFps, unsigned int *__pyx_v_averageLatency) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":483
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderStats(nvmlDevice_t device, unsigned int* sessionCount, unsigned int* averageFps, unsigned int* averageLatency) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetEncoderStats(device, sessionCount, averageFps, averageLatency)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderStats(__pyx_v_device, __pyx_v_sessionCount, __pyx_v_averageFps, __pyx_v_averageLatency); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 483, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":482
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderStats(nvmlDevice_t device, unsigned int* sessionCount, unsigned int* averageFps, unsigned int* averageLatency) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEncoderStats(device, sessionCount, averageFps, averageLatency)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetEncoderStats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":486
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned int* sessionCount, nvmlEncoderSessionInfo_t* sessionInfos) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEncoderSessions(device, sessionCount, sessionInfos)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderSessions(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_sessionCount, nvmlEncoderSessionInfo_t *__pyx_v_sessionInfos) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":487
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned int* sessionCount, nvmlEncoderSessionInfo_t* sessionInfos) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetEncoderSessions(device, sessionCount, sessionInfos)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderSessions(__pyx_v_device, __pyx_v_sessionCount, __pyx_v_sessionInfos); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 487, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":486
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned int* sessionCount, nvmlEncoderSessionInfo_t* sessionInfos) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetEncoderSessions(device, sessionCount, sessionInfos)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetEncoderSessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":490
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDecoderUtilization(device, utilization, samplingPeriodUs)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDecoderUtilization(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_utilization, unsigned int *__pyx_v_samplingPeriodUs) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":491
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetDecoderUtilization(device, utilization, samplingPeriodUs)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDecoderUtilization(__pyx_v_device, __pyx_v_utilization, __pyx_v_samplingPeriodUs); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 491, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":490
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDecoderUtilization(device, utilization, samplingPeriodUs)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetDecoderUtilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":494
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetJpgUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetJpgUtilization(device, utilization, samplingPeriodUs)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetJpgUtilization(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_utilization, unsigned int *__pyx_v_samplingPeriodUs) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":495
 * 
 * cdef nvmlReturn_t nvmlDeviceGetJpgUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetJpgUtilization(device, utilization, samplingPeriodUs)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetJpgUtilization(__pyx_v_device, __pyx_v_utilization, __pyx_v_samplingPeriodUs); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 495, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":494
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetJpgUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetJpgUtilization(device, utilization, samplingPeriodUs)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetJpgUtilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":498
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetOfaUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetOfaUtilization(device, utilization, samplingPeriodUs)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetOfaUtilization(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_utilization, unsigned int *__pyx_v_samplingPeriodUs) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":499
 * 
 * cdef nvmlReturn_t nvmlDeviceGetOfaUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetOfaUtilization(device, utilization, samplingPeriodUs)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetOfaUtilization(__pyx_v_device, __pyx_v_utilization, __pyx_v_samplingPeriodUs); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 499, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":498
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetOfaUtilization(nvmlDevice_t device, unsigned int* utilization, unsigned int* samplingPeriodUs) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetOfaUtilization(device, utilization, samplingPeriodUs)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetOfaUtilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":502
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFBCStats(nvmlDevice_t device, nvmlFBCStats_t* fbcStats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFBCStats(device, fbcStats)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCStats(nvmlDevice_t __pyx_v_device, nvmlFBCStats_t *__pyx_v_fbcStats) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":503
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFBCStats(nvmlDevice_t device, nvmlFBCStats_t* fbcStats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetFBCStats(device, fbcStats)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFBCStats(__pyx_v_device, __pyx_v_fbcStats); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 503, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":502
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFBCStats(nvmlDevice_t device, nvmlFBCStats_t* fbcStats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFBCStats(device, fbcStats)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetFBCStats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":506
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFBCSessions(nvmlDevice_t device, unsigned int* sessionCount, nvmlFBCSessionInfo_t* sessionInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFBCSessions(device, sessionCount, sessionInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCSessions(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_sessionCount, nvmlFBCSessionInfo_t *__pyx_v_sessionInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":507
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFBCSessions(nvmlDevice_t device, unsigned int* sessionCount, nvmlFBCSessionInfo_t* sessionInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetFBCSessions(device, sessionCount, sessionInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFBCSessions(__pyx_v_device, __pyx_v_sessionCount, __pyx_v_sessionInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 507, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":506
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFBCSessions(nvmlDevice_t device, unsigned int* sessionCount, nvmlFBCSessionInfo_t* sessionInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFBCSessions(device, sessionCount, sessionInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetFBCSessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":510
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDriverModel_v2(nvmlDevice_t device, nvmlDriverModel_t* current, nvmlDriverModel_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDriverModel_v2(device, current, pending)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDriverModel_v2(nvmlDevice_t __pyx_v_device, nvmlDriverModel_t *__pyx_v_current, nvmlDriverModel_t *__pyx_v_pending) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":511
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDriverModel_v2(nvmlDevice_t device, nvmlDriverModel_t* current, nvmlDriverModel_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetDriverModel_v2(device, current, pending)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDriverModel_v2(__pyx_v_device, __pyx_v_current, __pyx_v_pending); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 511, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":510
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDriverModel_v2(nvmlDevice_t device, nvmlDriverModel_t* current, nvmlDriverModel_t* pending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDriverModel_v2(device, current, pending)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetDriverModel_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":514
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVbiosVersion(nvmlDevice_t device, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVbiosVersion(device, version, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVbiosVersion(nvmlDevice_t __pyx_v_device, char *__pyx_v_version, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":515
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVbiosVersion(nvmlDevice_t device, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVbiosVersion(device, version, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVbiosVersion(__pyx_v_device, __pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 515, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":514
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVbiosVersion(nvmlDevice_t device, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVbiosVersion(device, version, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVbiosVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":518
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridgeChipHierarchy_t* bridgeHierarchy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBridgeChipInfo(device, bridgeHierarchy)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBridgeChipInfo(nvmlDevice_t __pyx_v_device, nvmlBridgeChipHierarchy_t *__pyx_v_bridgeHierarchy) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":519
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridgeChipHierarchy_t* bridgeHierarchy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetBridgeChipInfo(device, bridgeHierarchy)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBridgeChipInfo(__pyx_v_device, __pyx_v_bridgeHierarchy); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 519, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":518
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridgeChipHierarchy_t* bridgeHierarchy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBridgeChipInfo(device, bridgeHierarchy)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetBridgeChipInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":522
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int* infoCount, nvmlProcessInfo_t* infos) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetComputeRunningProcesses_v3(device, infoCount, infos)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeRunningProcesses_v3(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_infoCount, nvmlProcessInfo_t *__pyx_v_infos) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":523
 * 
 * cdef nvmlReturn_t nvmlDeviceGetComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int* infoCount, nvmlProcessInfo_t* infos) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetComputeRunningProcesses_v3(device, infoCount, infos)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetComputeRunningProcesses_v3(__pyx_v_device, __pyx_v_infoCount, __pyx_v_infos); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 523, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":522
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int* infoCount, nvmlProcessInfo_t* infos) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetComputeRunningProcesses_v3(device, infoCount, infos)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetComputeRunningProcesses_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":526
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMPSComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int* infoCount, nvmlProcessInfo_t* infos) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMPSComputeRunningProcesses_v3(device, infoCount, infos)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMPSComputeRunningProcesses_v3(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_infoCount, nvmlProcessInfo_t *__pyx_v_infos) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":527
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMPSComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int* infoCount, nvmlProcessInfo_t* infos) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMPSComputeRunningProcesses_v3(device, infoCount, infos)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMPSComputeRunningProcesses_v3(__pyx_v_device, __pyx_v_infoCount, __pyx_v_infos); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 527, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":526
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMPSComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int* infoCount, nvmlProcessInfo_t* infos) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMPSComputeRunningProcesses_v3(device, infoCount, infos)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMPSComputeRunningProcesses_v3", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":530
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRunningProcessDetailList(nvmlDevice_t device, nvmlProcessDetailList_t* plist) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRunningProcessDetailList(device, plist)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRunningProcessDetailList(nvmlDevice_t __pyx_v_device, nvmlProcessDetailList_t *__pyx_v_plist) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":531
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRunningProcessDetailList(nvmlDevice_t device, nvmlProcessDetailList_t* plist) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetRunningProcessDetailList(device, plist)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRunningProcessDetailList(__pyx_v_device, __pyx_v_plist); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 531, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":530
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRunningProcessDetailList(nvmlDevice_t device, nvmlProcessDetailList_t* plist) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRunningProcessDetailList(device, plist)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetRunningProcessDetailList", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":534
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceOnSameBoard(nvmlDevice_t device1, nvmlDevice_t device2, int* onSameBoard) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceOnSameBoard(device1, device2, onSameBoard)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceOnSameBoard(nvmlDevice_t __pyx_v_device1, nvmlDevice_t __pyx_v_device2, int *__pyx_v_onSameBoard) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":535
 * 
 * cdef nvmlReturn_t nvmlDeviceOnSameBoard(nvmlDevice_t device1, nvmlDevice_t device2, int* onSameBoard) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceOnSameBoard(device1, device2, onSameBoard)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceOnSameBoard(__pyx_v_device1, __pyx_v_device2, __pyx_v_onSameBoard); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 535, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":534
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceOnSameBoard(nvmlDevice_t device1, nvmlDevice_t device2, int* onSameBoard) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceOnSameBoard(device1, device2, onSameBoard)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceOnSameBoard", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":538
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t* isRestricted) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAPIRestriction(device, apiType, isRestricted)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAPIRestriction(nvmlDevice_t __pyx_v_device, nvmlRestrictedAPI_t __pyx_v_apiType, nvmlEnableState_t *__pyx_v_isRestricted) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":539
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t* isRestricted) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetAPIRestriction(device, apiType, isRestricted)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAPIRestriction(__pyx_v_device, __pyx_v_apiType, __pyx_v_isRestricted); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 539, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":538
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t* isRestricted) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAPIRestriction(device, apiType, isRestricted)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetAPIRestriction", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":542
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, nvmlValueType_t* sampleValType, unsigned int* sampleCount, nvmlSample_t* samples) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, sampleValType, sampleCount, samples)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSamples(nvmlDevice_t __pyx_v_device, nvmlSamplingType_t __pyx_v_type, unsigned PY_LONG_LONG __pyx_v_lastSeenTimeStamp, nvmlValueType_t *__pyx_v_sampleValType, unsigned int *__pyx_v_sampleCount, nvmlSample_t *__pyx_v_samples) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":543
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, nvmlValueType_t* sampleValType, unsigned int* sampleCount, nvmlSample_t* samples) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, sampleValType, sampleCount, samples)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSamples(__pyx_v_device, __pyx_v_type, __pyx_v_lastSeenTimeStamp, __pyx_v_sampleValType, __pyx_v_sampleCount, __pyx_v_samples); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 543, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":542
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, nvmlValueType_t* sampleValType, unsigned int* sampleCount, nvmlSample_t* samples) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSamples(device, type, lastSeenTimeStamp, sampleValType, sampleCount, samples)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetSamples", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":546
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t* bar1Memory) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBAR1MemoryInfo(device, bar1Memory)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t __pyx_v_device, nvmlBAR1Memory_t *__pyx_v_bar1Memory) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":547
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t* bar1Memory) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetBAR1MemoryInfo(device, bar1Memory)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBAR1MemoryInfo(__pyx_v_device, __pyx_v_bar1Memory); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 547, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":546
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t* bar1Memory) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBAR1MemoryInfo(device, bar1Memory)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetBAR1MemoryInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":550
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetIrqNum(nvmlDevice_t device, unsigned int* irqNum) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetIrqNum(device, irqNum)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetIrqNum(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_irqNum) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":551
 * 
 * cdef nvmlReturn_t nvmlDeviceGetIrqNum(nvmlDevice_t device, unsigned int* irqNum) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetIrqNum(device, irqNum)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetIrqNum(__pyx_v_device, __pyx_v_irqNum); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 551, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":550
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetIrqNum(nvmlDevice_t device, unsigned int* irqNum) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetIrqNum(device, irqNum)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetIrqNum", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":554
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNumGpuCores(nvmlDevice_t device, unsigned int* numCores) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNumGpuCores(device, numCores)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumGpuCores(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_numCores) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":555
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNumGpuCores(nvmlDevice_t device, unsigned int* numCores) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNumGpuCores(device, numCores)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNumGpuCores(__pyx_v_device, __pyx_v_numCores); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 555, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":554
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNumGpuCores(nvmlDevice_t device, unsigned int* numCores) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNumGpuCores(device, numCores)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNumGpuCores", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":558
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerSource(nvmlDevice_t device, nvmlPowerSource_t* powerSource) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerSource(device, powerSource)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerSource(nvmlDevice_t __pyx_v_device, nvmlPowerSource_t *__pyx_v_powerSource) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":559
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerSource(nvmlDevice_t device, nvmlPowerSource_t* powerSource) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPowerSource(device, powerSource)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerSource(__pyx_v_device, __pyx_v_powerSource); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 559, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":558
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerSource(nvmlDevice_t device, nvmlPowerSource_t* powerSource) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerSource(device, powerSource)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPowerSource", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":562
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryBusWidth(nvmlDevice_t device, unsigned int* busWidth) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemoryBusWidth(device, busWidth)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryBusWidth(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_busWidth) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":563
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryBusWidth(nvmlDevice_t device, unsigned int* busWidth) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMemoryBusWidth(device, busWidth)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryBusWidth(__pyx_v_device, __pyx_v_busWidth); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 563, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":562
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMemoryBusWidth(nvmlDevice_t device, unsigned int* busWidth) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMemoryBusWidth(device, busWidth)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMemoryBusWidth", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":566
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieLinkMaxSpeed(nvmlDevice_t device, unsigned int* maxSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPcieLinkMaxSpeed(device, maxSpeed)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieLinkMaxSpeed(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_maxSpeed) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":567
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieLinkMaxSpeed(nvmlDevice_t device, unsigned int* maxSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPcieLinkMaxSpeed(device, maxSpeed)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieLinkMaxSpeed(__pyx_v_device, __pyx_v_maxSpeed); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 567, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":566
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieLinkMaxSpeed(nvmlDevice_t device, unsigned int* maxSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPcieLinkMaxSpeed(device, maxSpeed)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPcieLinkMaxSpeed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":570
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieSpeed(nvmlDevice_t device, unsigned int* pcieSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPcieSpeed(device, pcieSpeed)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieSpeed(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_pcieSpeed) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":571
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieSpeed(nvmlDevice_t device, unsigned int* pcieSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPcieSpeed(device, pcieSpeed)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieSpeed(__pyx_v_device, __pyx_v_pcieSpeed); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 571, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":570
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPcieSpeed(nvmlDevice_t device, unsigned int* pcieSpeed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPcieSpeed(device, pcieSpeed)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPcieSpeed", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":574
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice_t device, unsigned int* adaptiveClockStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAdaptiveClockInfoStatus(device, adaptiveClockStatus)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_adaptiveClockStatus) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":575
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice_t device, unsigned int* adaptiveClockStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetAdaptiveClockInfoStatus(device, adaptiveClockStatus)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAdaptiveClockInfoStatus(__pyx_v_device, __pyx_v_adaptiveClockStatus); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 575, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":574
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice_t device, unsigned int* adaptiveClockStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAdaptiveClockInfoStatus(device, adaptiveClockStatus)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetAdaptiveClockInfoStatus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":578
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBusType(nvmlDevice_t device, nvmlBusType_t* type) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBusType(device, type)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBusType(nvmlDevice_t __pyx_v_device, nvmlBusType_t *__pyx_v_type) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":579
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBusType(nvmlDevice_t device, nvmlBusType_t* type) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetBusType(device, type)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBusType(__pyx_v_device, __pyx_v_type); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 579, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":578
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetBusType(nvmlDevice_t device, nvmlBusType_t* type) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetBusType(device, type)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetBusType", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":582
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuFabricInfoV(nvmlDevice_t device, nvmlGpuFabricInfoV_t* gpuFabricInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuFabricInfoV(device, gpuFabricInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuFabricInfoV(nvmlDevice_t __pyx_v_device, nvmlGpuFabricInfoV_t *__pyx_v_gpuFabricInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":583
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuFabricInfoV(nvmlDevice_t device, nvmlGpuFabricInfoV_t* gpuFabricInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpuFabricInfoV(device, gpuFabricInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuFabricInfoV(__pyx_v_device, __pyx_v_gpuFabricInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 583, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":582
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuFabricInfoV(nvmlDevice_t device, nvmlGpuFabricInfoV_t* gpuFabricInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuFabricInfoV(device, gpuFabricInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpuFabricInfoV", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":586
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeCapabilities(nvmlConfComputeSystemCaps_t* capabilities) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetConfComputeCapabilities(capabilities)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeCapabilities(nvmlConfComputeSystemCaps_t *__pyx_v_capabilities) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":587
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeCapabilities(nvmlConfComputeSystemCaps_t* capabilities) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetConfComputeCapabilities(capabilities)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeCapabilities(__pyx_v_capabilities); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 587, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":586
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeCapabilities(nvmlConfComputeSystemCaps_t* capabilities) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetConfComputeCapabilities(capabilities)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetConfComputeCapabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":590
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeState(nvmlConfComputeSystemState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetConfComputeState(state)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeState(nvmlConfComputeSystemState_t *__pyx_v_state) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":591
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeState(nvmlConfComputeSystemState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetConfComputeState(state)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeState(__pyx_v_state); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 591, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":590
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeState(nvmlConfComputeSystemState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetConfComputeState(state)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetConfComputeState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":594
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeMemSizeInfo(nvmlDevice_t device, nvmlConfComputeMemSizeInfo_t* memInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetConfComputeMemSizeInfo(device, memInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeMemSizeInfo(nvmlDevice_t __pyx_v_device, nvmlConfComputeMemSizeInfo_t *__pyx_v_memInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":595
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeMemSizeInfo(nvmlDevice_t device, nvmlConfComputeMemSizeInfo_t* memInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetConfComputeMemSizeInfo(device, memInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeMemSizeInfo(__pyx_v_device, __pyx_v_memInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 595, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":594
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeMemSizeInfo(nvmlDevice_t device, nvmlConfComputeMemSizeInfo_t* memInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetConfComputeMemSizeInfo(device, memInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetConfComputeMemSizeInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":598
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeGpusReadyState(unsigned int* isAcceptingWork) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetConfComputeGpusReadyState(isAcceptingWork)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeGpusReadyState(unsigned int *__pyx_v_isAcceptingWork) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":599
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeGpusReadyState(unsigned int* isAcceptingWork) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetConfComputeGpusReadyState(isAcceptingWork)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeGpusReadyState(__pyx_v_isAcceptingWork); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 599, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":598
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeGpusReadyState(unsigned int* isAcceptingWork) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetConfComputeGpusReadyState(isAcceptingWork)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetConfComputeGpusReadyState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":602
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeProtectedMemoryUsage(nvmlDevice_t device, nvmlMemory_t* memory) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetConfComputeProtectedMemoryUsage(device, memory)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeProtectedMemoryUsage(nvmlDevice_t __pyx_v_device, nvmlMemory_t *__pyx_v_memory) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":603
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeProtectedMemoryUsage(nvmlDevice_t device, nvmlMemory_t* memory) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetConfComputeProtectedMemoryUsage(device, memory)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeProtectedMemoryUsage(__pyx_v_device, __pyx_v_memory); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 603, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":602
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeProtectedMemoryUsage(nvmlDevice_t device, nvmlMemory_t* memory) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetConfComputeProtectedMemoryUsage(device, memory)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetConfComputeProtectedMemoryUsage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":606
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeGpuCertificate(nvmlDevice_t device, nvmlConfComputeGpuCertificate_t* gpuCert) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetConfComputeGpuCertificate(device, gpuCert)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeGpuCertificate(nvmlDevice_t __pyx_v_device, nvmlConfComputeGpuCertificate_t *__pyx_v_gpuCert) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":607
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeGpuCertificate(nvmlDevice_t device, nvmlConfComputeGpuCertificate_t* gpuCert) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetConfComputeGpuCertificate(device, gpuCert)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeGpuCertificate(__pyx_v_device, __pyx_v_gpuCert); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 607, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":606
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeGpuCertificate(nvmlDevice_t device, nvmlConfComputeGpuCertificate_t* gpuCert) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetConfComputeGpuCertificate(device, gpuCert)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetConfComputeGpuCertificate", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":610
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeGpuAttestationReport(nvmlDevice_t device, nvmlConfComputeGpuAttestationReport_t* gpuAtstReport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetConfComputeGpuAttestationReport(device, gpuAtstReport)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeGpuAttestationReport(nvmlDevice_t __pyx_v_device, nvmlConfComputeGpuAttestationReport_t *__pyx_v_gpuAtstReport) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":611
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeGpuAttestationReport(nvmlDevice_t device, nvmlConfComputeGpuAttestationReport_t* gpuAtstReport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetConfComputeGpuAttestationReport(device, gpuAtstReport)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeGpuAttestationReport(__pyx_v_device, __pyx_v_gpuAtstReport); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 611, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":610
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetConfComputeGpuAttestationReport(nvmlDevice_t device, nvmlConfComputeGpuAttestationReport_t* gpuAtstReport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetConfComputeGpuAttestationReport(device, gpuAtstReport)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetConfComputeGpuAttestationReport", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":614
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeKeyRotationThresholdInfo(nvmlConfComputeGetKeyRotationThresholdInfo_t* pKeyRotationThrInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetConfComputeKeyRotationThresholdInfo(pKeyRotationThrInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeKeyRotationThresholdInfo(nvmlConfComputeGetKeyRotationThresholdInfo_t *__pyx_v_pKeyRotationThrInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":615
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeKeyRotationThresholdInfo(nvmlConfComputeGetKeyRotationThresholdInfo_t* pKeyRotationThrInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetConfComputeKeyRotationThresholdInfo(pKeyRotationThrInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeKeyRotationThresholdInfo(__pyx_v_pKeyRotationThrInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 615, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":614
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeKeyRotationThresholdInfo(nvmlConfComputeGetKeyRotationThresholdInfo_t* pKeyRotationThrInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetConfComputeKeyRotationThresholdInfo(pKeyRotationThrInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetConfComputeKeyRotationThresholdInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":618
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetConfComputeUnprotectedMemSize(nvmlDevice_t device, unsigned long long sizeKiB) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetConfComputeUnprotectedMemSize(device, sizeKiB)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetConfComputeUnprotectedMemSize(nvmlDevice_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_sizeKiB) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":619
 * 
 * cdef nvmlReturn_t nvmlDeviceSetConfComputeUnprotectedMemSize(nvmlDevice_t device, unsigned long long sizeKiB) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetConfComputeUnprotectedMemSize(device, sizeKiB)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetConfComputeUnprotectedMemSize(__pyx_v_device, __pyx_v_sizeKiB); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 619, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":618
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetConfComputeUnprotectedMemSize(nvmlDevice_t device, unsigned long long sizeKiB) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetConfComputeUnprotectedMemSize(device, sizeKiB)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetConfComputeUnprotectedMemSize", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":622
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemSetConfComputeGpusReadyState(unsigned int isAcceptingWork) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemSetConfComputeGpusReadyState(isAcceptingWork)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetConfComputeGpusReadyState(unsigned int __pyx_v_isAcceptingWork) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":623
 * 
 * cdef nvmlReturn_t nvmlSystemSetConfComputeGpusReadyState(unsigned int isAcceptingWork) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemSetConfComputeGpusReadyState(isAcceptingWork)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemSetConfComputeGpusReadyState(__pyx_v_isAcceptingWork); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 623, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":622
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemSetConfComputeGpusReadyState(unsigned int isAcceptingWork) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemSetConfComputeGpusReadyState(isAcceptingWork)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemSetConfComputeGpusReadyState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":626
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemSetConfComputeKeyRotationThresholdInfo(nvmlConfComputeSetKeyRotationThresholdInfo_t* pKeyRotationThrInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemSetConfComputeKeyRotationThresholdInfo(pKeyRotationThrInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetConfComputeKeyRotationThresholdInfo(nvmlConfComputeSetKeyRotationThresholdInfo_t *__pyx_v_pKeyRotationThrInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":627
 * 
 * cdef nvmlReturn_t nvmlSystemSetConfComputeKeyRotationThresholdInfo(nvmlConfComputeSetKeyRotationThresholdInfo_t* pKeyRotationThrInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemSetConfComputeKeyRotationThresholdInfo(pKeyRotationThrInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemSetConfComputeKeyRotationThresholdInfo(__pyx_v_pKeyRotationThrInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 627, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":626
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemSetConfComputeKeyRotationThresholdInfo(nvmlConfComputeSetKeyRotationThresholdInfo_t* pKeyRotationThrInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemSetConfComputeKeyRotationThresholdInfo(pKeyRotationThrInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemSetConfComputeKeyRotationThresholdInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":630
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeSettings(nvmlSystemConfComputeSettings_t* settings) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetConfComputeSettings(settings)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeSettings(nvmlSystemConfComputeSettings_t *__pyx_v_settings) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":631
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeSettings(nvmlSystemConfComputeSettings_t* settings) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetConfComputeSettings(settings)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeSettings(__pyx_v_settings); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 631, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":630
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetConfComputeSettings(nvmlSystemConfComputeSettings_t* settings) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetConfComputeSettings(settings)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetConfComputeSettings", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":634
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGspFirmwareVersion(nvmlDevice_t device, char* version) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGspFirmwareVersion(device, version)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGspFirmwareVersion(nvmlDevice_t __pyx_v_device, char *__pyx_v_version) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":635
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGspFirmwareVersion(nvmlDevice_t device, char* version) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGspFirmwareVersion(device, version)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGspFirmwareVersion(__pyx_v_device, __pyx_v_version); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 635, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":634
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGspFirmwareVersion(nvmlDevice_t device, char* version) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGspFirmwareVersion(device, version)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGspFirmwareVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":638
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGspFirmwareMode(nvmlDevice_t device, unsigned int* isEnabled, unsigned int* defaultMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGspFirmwareMode(device, isEnabled, defaultMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGspFirmwareMode(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_isEnabled, unsigned int *__pyx_v_defaultMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":639
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGspFirmwareMode(nvmlDevice_t device, unsigned int* isEnabled, unsigned int* defaultMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGspFirmwareMode(device, isEnabled, defaultMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGspFirmwareMode(__pyx_v_device, __pyx_v_isEnabled, __pyx_v_defaultMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 639, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":638
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGspFirmwareMode(nvmlDevice_t device, unsigned int* isEnabled, unsigned int* defaultMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGspFirmwareMode(device, isEnabled, defaultMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGspFirmwareMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":642
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSramEccErrorStatus(nvmlDevice_t device, nvmlEccSramErrorStatus_t* status) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSramEccErrorStatus(device, status)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramEccErrorStatus(nvmlDevice_t __pyx_v_device, nvmlEccSramErrorStatus_t *__pyx_v_status) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":643
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSramEccErrorStatus(nvmlDevice_t device, nvmlEccSramErrorStatus_t* status) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetSramEccErrorStatus(device, status)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSramEccErrorStatus(__pyx_v_device, __pyx_v_status); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 643, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":642
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSramEccErrorStatus(nvmlDevice_t device, nvmlEccSramErrorStatus_t* status) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSramEccErrorStatus(device, status)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetSramEccErrorStatus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":646
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingMode(nvmlDevice_t device, nvmlEnableState_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAccountingMode(device, mode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingMode(nvmlDevice_t __pyx_v_device, nvmlEnableState_t *__pyx_v_mode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":647
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingMode(nvmlDevice_t device, nvmlEnableState_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetAccountingMode(device, mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingMode(__pyx_v_device, __pyx_v_mode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 647, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":646
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingMode(nvmlDevice_t device, nvmlEnableState_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAccountingMode(device, mode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetAccountingMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":650
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingStats(nvmlDevice_t device, unsigned int pid, nvmlAccountingStats_t* stats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAccountingStats(device, pid, stats)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingStats(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_pid, nvmlAccountingStats_t *__pyx_v_stats) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":651
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingStats(nvmlDevice_t device, unsigned int pid, nvmlAccountingStats_t* stats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetAccountingStats(device, pid, stats)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingStats(__pyx_v_device, __pyx_v_pid, __pyx_v_stats); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 651, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":650
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingStats(nvmlDevice_t device, unsigned int pid, nvmlAccountingStats_t* stats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAccountingStats(device, pid, stats)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetAccountingStats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":654
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingPids(nvmlDevice_t device, unsigned int* count, unsigned int* pids) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAccountingPids(device, count, pids)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingPids(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_count, unsigned int *__pyx_v_pids) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":655
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingPids(nvmlDevice_t device, unsigned int* count, unsigned int* pids) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetAccountingPids(device, count, pids)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingPids(__pyx_v_device, __pyx_v_count, __pyx_v_pids); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 655, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":654
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingPids(nvmlDevice_t device, unsigned int* count, unsigned int* pids) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAccountingPids(device, count, pids)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetAccountingPids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":658
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingBufferSize(nvmlDevice_t device, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAccountingBufferSize(device, bufferSize)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingBufferSize(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_bufferSize) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":659
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingBufferSize(nvmlDevice_t device, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetAccountingBufferSize(device, bufferSize)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingBufferSize(__pyx_v_device, __pyx_v_bufferSize); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 659, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":658
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAccountingBufferSize(nvmlDevice_t device, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAccountingBufferSize(device, bufferSize)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetAccountingBufferSize", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":662
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRetiredPages(nvmlDevice_t device, nvmlPageRetirementCause_t cause, unsigned int* pageCount, unsigned long long* addresses) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRetiredPages(device, cause, pageCount, addresses)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages(nvmlDevice_t __pyx_v_device, nvmlPageRetirementCause_t __pyx_v_cause, unsigned int *__pyx_v_pageCount, unsigned PY_LONG_LONG *__pyx_v_addresses) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":663
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRetiredPages(nvmlDevice_t device, nvmlPageRetirementCause_t cause, unsigned int* pageCount, unsigned long long* addresses) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetRetiredPages(device, cause, pageCount, addresses)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRetiredPages(__pyx_v_device, __pyx_v_cause, __pyx_v_pageCount, __pyx_v_addresses); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 663, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":662
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRetiredPages(nvmlDevice_t device, nvmlPageRetirementCause_t cause, unsigned int* pageCount, unsigned long long* addresses) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRetiredPages(device, cause, pageCount, addresses)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetRetiredPages", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":666
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRetiredPages_v2(nvmlDevice_t device, nvmlPageRetirementCause_t cause, unsigned int* pageCount, unsigned long long* addresses, unsigned long long* timestamps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRetiredPages_v2(device, cause, pageCount, addresses, timestamps)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages_v2(nvmlDevice_t __pyx_v_device, nvmlPageRetirementCause_t __pyx_v_cause, unsigned int *__pyx_v_pageCount, unsigned PY_LONG_LONG *__pyx_v_addresses, unsigned PY_LONG_LONG *__pyx_v_timestamps) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":667
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRetiredPages_v2(nvmlDevice_t device, nvmlPageRetirementCause_t cause, unsigned int* pageCount, unsigned long long* addresses, unsigned long long* timestamps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetRetiredPages_v2(device, cause, pageCount, addresses, timestamps)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRetiredPages_v2(__pyx_v_device, __pyx_v_cause, __pyx_v_pageCount, __pyx_v_addresses, __pyx_v_timestamps); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 667, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":666
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRetiredPages_v2(nvmlDevice_t device, nvmlPageRetirementCause_t cause, unsigned int* pageCount, unsigned long long* addresses, unsigned long long* timestamps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRetiredPages_v2(device, cause, pageCount, addresses, timestamps)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetRetiredPages_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":670
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t* isPending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRetiredPagesPendingStatus(device, isPending)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t __pyx_v_device, nvmlEnableState_t *__pyx_v_isPending) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":671
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t* isPending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetRetiredPagesPendingStatus(device, isPending)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRetiredPagesPendingStatus(__pyx_v_device, __pyx_v_isPending); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 671, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":670
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t* isPending) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRetiredPagesPendingStatus(device, isPending)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetRetiredPagesPendingStatus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":674
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRemappedRows(nvmlDevice_t device, unsigned int* corrRows, unsigned int* uncRows, unsigned int* isPending, unsigned int* failureOccurred) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRemappedRows(device, corrRows, uncRows, isPending, failureOccurred)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRemappedRows(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_corrRows, unsigned int *__pyx_v_uncRows, unsigned int *__pyx_v_isPending, unsigned int *__pyx_v_failureOccurred) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":675
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRemappedRows(nvmlDevice_t device, unsigned int* corrRows, unsigned int* uncRows, unsigned int* isPending, unsigned int* failureOccurred) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetRemappedRows(device, corrRows, uncRows, isPending, failureOccurred)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRemappedRows(__pyx_v_device, __pyx_v_corrRows, __pyx_v_uncRows, __pyx_v_isPending, __pyx_v_failureOccurred); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 675, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":674
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRemappedRows(nvmlDevice_t device, unsigned int* corrRows, unsigned int* uncRows, unsigned int* isPending, unsigned int* failureOccurred) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRemappedRows(device, corrRows, uncRows, isPending, failureOccurred)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetRemappedRows", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":678
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRowRemapperHistogram(nvmlDevice_t device, nvmlRowRemapperHistogramValues_t* values) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRowRemapperHistogram(device, values)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRowRemapperHistogram(nvmlDevice_t __pyx_v_device, nvmlRowRemapperHistogramValues_t *__pyx_v_values) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":679
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRowRemapperHistogram(nvmlDevice_t device, nvmlRowRemapperHistogramValues_t* values) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetRowRemapperHistogram(device, values)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRowRemapperHistogram(__pyx_v_device, __pyx_v_values); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 679, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":678
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRowRemapperHistogram(nvmlDevice_t device, nvmlRowRemapperHistogramValues_t* values) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRowRemapperHistogram(device, values)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetRowRemapperHistogram", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":682
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetArchitecture(nvmlDevice_t device, nvmlDeviceArchitecture_t* arch) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetArchitecture(device, arch)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetArchitecture(nvmlDevice_t __pyx_v_device, nvmlDeviceArchitecture_t *__pyx_v_arch) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":683
 * 
 * cdef nvmlReturn_t nvmlDeviceGetArchitecture(nvmlDevice_t device, nvmlDeviceArchitecture_t* arch) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetArchitecture(device, arch)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetArchitecture(__pyx_v_device, __pyx_v_arch); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 683, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":682
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetArchitecture(nvmlDevice_t device, nvmlDeviceArchitecture_t* arch) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetArchitecture(device, arch)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetArchitecture", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":686
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClkMonStatus(nvmlDevice_t device, nvmlClkMonStatus_t* status) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetClkMonStatus(device, status)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClkMonStatus(nvmlDevice_t __pyx_v_device, nvmlClkMonStatus_t *__pyx_v_status) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":687
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClkMonStatus(nvmlDevice_t device, nvmlClkMonStatus_t* status) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetClkMonStatus(device, status)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClkMonStatus(__pyx_v_device, __pyx_v_status); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 687, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":686
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetClkMonStatus(nvmlDevice_t device, nvmlClkMonStatus_t* status) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetClkMonStatus(device, status)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetClkMonStatus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":690
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t* utilization, unsigned int* processSamplesCount, unsigned long long lastSeenTimeStamp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetProcessUtilization(device, utilization, processSamplesCount, lastSeenTimeStamp)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessUtilization(nvmlDevice_t __pyx_v_device, nvmlProcessUtilizationSample_t *__pyx_v_utilization, unsigned int *__pyx_v_processSamplesCount, unsigned PY_LONG_LONG __pyx_v_lastSeenTimeStamp) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":691
 * 
 * cdef nvmlReturn_t nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t* utilization, unsigned int* processSamplesCount, unsigned long long lastSeenTimeStamp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetProcessUtilization(device, utilization, processSamplesCount, lastSeenTimeStamp)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetProcessUtilization(__pyx_v_device, __pyx_v_utilization, __pyx_v_processSamplesCount, __pyx_v_lastSeenTimeStamp); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 691, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":690
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t* utilization, unsigned int* processSamplesCount, unsigned long long lastSeenTimeStamp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetProcessUtilization(device, utilization, processSamplesCount, lastSeenTimeStamp)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetProcessUtilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":694
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetProcessesUtilizationInfo(nvmlDevice_t device, nvmlProcessesUtilizationInfo_t* procesesUtilInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetProcessesUtilizationInfo(device, procesesUtilInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessesUtilizationInfo(nvmlDevice_t __pyx_v_device, nvmlProcessesUtilizationInfo_t *__pyx_v_procesesUtilInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":695
 * 
 * cdef nvmlReturn_t nvmlDeviceGetProcessesUtilizationInfo(nvmlDevice_t device, nvmlProcessesUtilizationInfo_t* procesesUtilInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetProcessesUtilizationInfo(device, procesesUtilInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetProcessesUtilizationInfo(__pyx_v_device, __pyx_v_procesesUtilInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 695, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":694
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetProcessesUtilizationInfo(nvmlDevice_t device, nvmlProcessesUtilizationInfo_t* procesesUtilInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetProcessesUtilizationInfo(device, procesesUtilInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetProcessesUtilizationInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":698
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPlatformInfo(nvmlDevice_t device, nvmlPlatformInfo_t* platformInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPlatformInfo(device, platformInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPlatformInfo(nvmlDevice_t __pyx_v_device, nvmlPlatformInfo_t *__pyx_v_platformInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":699
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPlatformInfo(nvmlDevice_t device, nvmlPlatformInfo_t* platformInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPlatformInfo(device, platformInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPlatformInfo(__pyx_v_device, __pyx_v_platformInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 699, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":698
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPlatformInfo(nvmlDevice_t device, nvmlPlatformInfo_t* platformInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPlatformInfo(device, platformInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPlatformInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":702
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitSetLedState(nvmlUnit_t unit, nvmlLedColor_t color) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitSetLedState(unit, color)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitSetLedState(nvmlUnit_t __pyx_v_unit, nvmlLedColor_t __pyx_v_color) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":703
 * 
 * cdef nvmlReturn_t nvmlUnitSetLedState(nvmlUnit_t unit, nvmlLedColor_t color) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlUnitSetLedState(unit, color)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitSetLedState(__pyx_v_unit, __pyx_v_color); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 703, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":702
 * 
 * 
 * cdef nvmlReturn_t nvmlUnitSetLedState(nvmlUnit_t unit, nvmlLedColor_t color) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlUnitSetLedState(unit, color)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlUnitSetLedState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":706
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetPersistenceMode(device, mode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPersistenceMode(nvmlDevice_t __pyx_v_device, nvmlEnableState_t __pyx_v_mode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":707
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetPersistenceMode(device, mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPersistenceMode(__pyx_v_device, __pyx_v_mode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 707, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":706
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetPersistenceMode(device, mode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetPersistenceMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":710
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetComputeMode(nvmlDevice_t device, nvmlComputeMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetComputeMode(device, mode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetComputeMode(nvmlDevice_t __pyx_v_device, nvmlComputeMode_t __pyx_v_mode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":711
 * 
 * cdef nvmlReturn_t nvmlDeviceSetComputeMode(nvmlDevice_t device, nvmlComputeMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetComputeMode(device, mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetComputeMode(__pyx_v_device, __pyx_v_mode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 711, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":710
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetComputeMode(nvmlDevice_t device, nvmlComputeMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetComputeMode(device, mode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetComputeMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":714
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetEccMode(nvmlDevice_t device, nvmlEnableState_t ecc) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetEccMode(device, ecc)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetEccMode(nvmlDevice_t __pyx_v_device, nvmlEnableState_t __pyx_v_ecc) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":715
 * 
 * cdef nvmlReturn_t nvmlDeviceSetEccMode(nvmlDevice_t device, nvmlEnableState_t ecc) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetEccMode(device, ecc)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetEccMode(__pyx_v_device, __pyx_v_ecc); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 715, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":714
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetEccMode(nvmlDevice_t device, nvmlEnableState_t ecc) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetEccMode(device, ecc)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetEccMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":718
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceClearEccErrorCounts(nvmlDevice_t device, nvmlEccCounterType_t counterType) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceClearEccErrorCounts(device, counterType)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearEccErrorCounts(nvmlDevice_t __pyx_v_device, nvmlEccCounterType_t __pyx_v_counterType) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":719
 * 
 * cdef nvmlReturn_t nvmlDeviceClearEccErrorCounts(nvmlDevice_t device, nvmlEccCounterType_t counterType) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceClearEccErrorCounts(device, counterType)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearEccErrorCounts(__pyx_v_device, __pyx_v_counterType); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 719, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":718
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceClearEccErrorCounts(nvmlDevice_t device, nvmlEccCounterType_t counterType) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceClearEccErrorCounts(device, counterType)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceClearEccErrorCounts", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":722
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDriverModel(nvmlDevice_t device, nvmlDriverModel_t driverModel, unsigned int flags) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetDriverModel(device, driverModel, flags)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDriverModel(nvmlDevice_t __pyx_v_device, nvmlDriverModel_t __pyx_v_driverModel, unsigned int __pyx_v_flags) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":723
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDriverModel(nvmlDevice_t device, nvmlDriverModel_t driverModel, unsigned int flags) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetDriverModel(device, driverModel, flags)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDriverModel(__pyx_v_device, __pyx_v_driverModel, __pyx_v_flags); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 723, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":722
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDriverModel(nvmlDevice_t device, nvmlDriverModel_t driverModel, unsigned int flags) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetDriverModel(device, driverModel, flags)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetDriverModel", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":726
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetGpuLockedClocks(nvmlDevice_t device, unsigned int minGpuClockMHz, unsigned int maxGpuClockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetGpuLockedClocks(device, minGpuClockMHz, maxGpuClockMHz)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetGpuLockedClocks(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_minGpuClockMHz, unsigned int __pyx_v_maxGpuClockMHz) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":727
 * 
 * cdef nvmlReturn_t nvmlDeviceSetGpuLockedClocks(nvmlDevice_t device, unsigned int minGpuClockMHz, unsigned int maxGpuClockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetGpuLockedClocks(device, minGpuClockMHz, maxGpuClockMHz)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetGpuLockedClocks(__pyx_v_device, __pyx_v_minGpuClockMHz, __pyx_v_maxGpuClockMHz); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 727, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":726
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetGpuLockedClocks(nvmlDevice_t device, unsigned int minGpuClockMHz, unsigned int maxGpuClockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetGpuLockedClocks(device, minGpuClockMHz, maxGpuClockMHz)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetGpuLockedClocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":730
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceResetGpuLockedClocks(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceResetGpuLockedClocks(device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetGpuLockedClocks(nvmlDevice_t __pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":731
 * 
 * cdef nvmlReturn_t nvmlDeviceResetGpuLockedClocks(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceResetGpuLockedClocks(device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceResetGpuLockedClocks(__pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 731, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":730
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceResetGpuLockedClocks(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceResetGpuLockedClocks(device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceResetGpuLockedClocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":734
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetMemoryLockedClocks(nvmlDevice_t device, unsigned int minMemClockMHz, unsigned int maxMemClockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetMemoryLockedClocks(device, minMemClockMHz, maxMemClockMHz)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetMemoryLockedClocks(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_minMemClockMHz, unsigned int __pyx_v_maxMemClockMHz) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":735
 * 
 * cdef nvmlReturn_t nvmlDeviceSetMemoryLockedClocks(nvmlDevice_t device, unsigned int minMemClockMHz, unsigned int maxMemClockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetMemoryLockedClocks(device, minMemClockMHz, maxMemClockMHz)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetMemoryLockedClocks(__pyx_v_device, __pyx_v_minMemClockMHz, __pyx_v_maxMemClockMHz); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 735, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":734
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetMemoryLockedClocks(nvmlDevice_t device, unsigned int minMemClockMHz, unsigned int maxMemClockMHz) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetMemoryLockedClocks(device, minMemClockMHz, maxMemClockMHz)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetMemoryLockedClocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":738
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceResetMemoryLockedClocks(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceResetMemoryLockedClocks(device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetMemoryLockedClocks(nvmlDevice_t __pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":739
 * 
 * cdef nvmlReturn_t nvmlDeviceResetMemoryLockedClocks(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceResetMemoryLockedClocks(device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceResetMemoryLockedClocks(__pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 739, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":738
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceResetMemoryLockedClocks(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceResetMemoryLockedClocks(device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceResetMemoryLockedClocks", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":742
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetAutoBoostedClocksEnabled(device, enabled)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t __pyx_v_device, nvmlEnableState_t __pyx_v_enabled) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":743
 * 
 * cdef nvmlReturn_t nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetAutoBoostedClocksEnabled(device, enabled)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetAutoBoostedClocksEnabled(__pyx_v_device, __pyx_v_enabled); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 743, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":742
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetAutoBoostedClocksEnabled(device, enabled)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetAutoBoostedClocksEnabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":746
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetDefaultAutoBoostedClocksEnabled(device, enabled, flags)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t __pyx_v_device, nvmlEnableState_t __pyx_v_enabled, unsigned int __pyx_v_flags) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":747
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetDefaultAutoBoostedClocksEnabled(device, enabled, flags)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDefaultAutoBoostedClocksEnabled(__pyx_v_device, __pyx_v_enabled, __pyx_v_flags); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 747, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":746
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetDefaultAutoBoostedClocksEnabled(device, enabled, flags)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetDefaultAutoBoostedClocksEnabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":750
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice_t device, unsigned int fan) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetDefaultFanSpeed_v2(device, fan)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_fan) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":751
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice_t device, unsigned int fan) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetDefaultFanSpeed_v2(device, fan)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDefaultFanSpeed_v2(__pyx_v_device, __pyx_v_fan); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 751, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":750
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice_t device, unsigned int fan) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetDefaultFanSpeed_v2(device, fan)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetDefaultFanSpeed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":754
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetFanControlPolicy(nvmlDevice_t device, unsigned int fan, nvmlFanControlPolicy_t policy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetFanControlPolicy(device, fan, policy)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetFanControlPolicy(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_fan, nvmlFanControlPolicy_t __pyx_v_policy) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":755
 * 
 * cdef nvmlReturn_t nvmlDeviceSetFanControlPolicy(nvmlDevice_t device, unsigned int fan, nvmlFanControlPolicy_t policy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetFanControlPolicy(device, fan, policy)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetFanControlPolicy(__pyx_v_device, __pyx_v_fan, __pyx_v_policy); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 755, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":754
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetFanControlPolicy(nvmlDevice_t device, unsigned int fan, nvmlFanControlPolicy_t policy) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetFanControlPolicy(device, fan, policy)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetFanControlPolicy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":758
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetTemperatureThreshold(device, thresholdType, temp)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetTemperatureThreshold(nvmlDevice_t __pyx_v_device, nvmlTemperatureThresholds_t __pyx_v_thresholdType, int *__pyx_v_temp) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":759
 * 
 * cdef nvmlReturn_t nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetTemperatureThreshold(device, thresholdType, temp)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetTemperatureThreshold(__pyx_v_device, __pyx_v_thresholdType, __pyx_v_temp); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 759, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":758
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, int* temp) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetTemperatureThreshold(device, thresholdType, temp)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetTemperatureThreshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":762
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetPowerManagementLimit(device, limit)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerManagementLimit(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_limit) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":763
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetPowerManagementLimit(device, limit)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPowerManagementLimit(__pyx_v_device, __pyx_v_limit); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 763, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":762
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetPowerManagementLimit(device, limit)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetPowerManagementLimit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":766
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetGpuOperationMode(device, mode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetGpuOperationMode(nvmlDevice_t __pyx_v_device, nvmlGpuOperationMode_t __pyx_v_mode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":767
 * 
 * cdef nvmlReturn_t nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetGpuOperationMode(device, mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetGpuOperationMode(__pyx_v_device, __pyx_v_mode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 767, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":766
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetGpuOperationMode(device, mode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetGpuOperationMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":770
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetAPIRestriction(device, apiType, isRestricted)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAPIRestriction(nvmlDevice_t __pyx_v_device, nvmlRestrictedAPI_t __pyx_v_apiType, nvmlEnableState_t __pyx_v_isRestricted) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":771
 * 
 * cdef nvmlReturn_t nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetAPIRestriction(device, apiType, isRestricted)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetAPIRestriction(__pyx_v_device, __pyx_v_apiType, __pyx_v_isRestricted); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 771, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":770
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetAPIRestriction(device, apiType, isRestricted)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetAPIRestriction", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":774
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetFanSpeed_v2(device, fan, speed)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetFanSpeed_v2(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_fan, unsigned int __pyx_v_speed) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":775
 * 
 * cdef nvmlReturn_t nvmlDeviceSetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetFanSpeed_v2(device, fan, speed)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetFanSpeed_v2(__pyx_v_device, __pyx_v_fan, __pyx_v_speed); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 775, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":774
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int speed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetFanSpeed_v2(device, fan, speed)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetFanSpeed_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":778
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetAccountingMode(nvmlDevice_t device, nvmlEnableState_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetAccountingMode(device, mode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAccountingMode(nvmlDevice_t __pyx_v_device, nvmlEnableState_t __pyx_v_mode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":779
 * 
 * cdef nvmlReturn_t nvmlDeviceSetAccountingMode(nvmlDevice_t device, nvmlEnableState_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetAccountingMode(device, mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetAccountingMode(__pyx_v_device, __pyx_v_mode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 779, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":778
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetAccountingMode(nvmlDevice_t device, nvmlEnableState_t mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetAccountingMode(device, mode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetAccountingMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":782
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceClearAccountingPids(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceClearAccountingPids(device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearAccountingPids(nvmlDevice_t __pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":783
 * 
 * cdef nvmlReturn_t nvmlDeviceClearAccountingPids(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceClearAccountingPids(device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearAccountingPids(__pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 783, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":782
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceClearAccountingPids(nvmlDevice_t device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceClearAccountingPids(device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceClearAccountingPids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":786
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPowerManagementLimit_v2(nvmlDevice_t device, nvmlPowerValue_v2_t* powerValue) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetPowerManagementLimit_v2(device, powerValue)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerManagementLimit_v2(nvmlDevice_t __pyx_v_device, nvmlPowerValue_v2_t *__pyx_v_powerValue) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":787
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPowerManagementLimit_v2(nvmlDevice_t device, nvmlPowerValue_v2_t* powerValue) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetPowerManagementLimit_v2(device, powerValue)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPowerManagementLimit_v2(__pyx_v_device, __pyx_v_powerValue); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 787, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":786
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPowerManagementLimit_v2(nvmlDevice_t device, nvmlPowerValue_v2_t* powerValue) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetPowerManagementLimit_v2(device, powerValue)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetPowerManagementLimit_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":790
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkState(nvmlDevice_t device, unsigned int link, nvmlEnableState_t* isActive) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkState(device, link, isActive)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkState(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_link, nvmlEnableState_t *__pyx_v_isActive) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":791
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkState(nvmlDevice_t device, unsigned int link, nvmlEnableState_t* isActive) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNvLinkState(device, link, isActive)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkState(__pyx_v_device, __pyx_v_link, __pyx_v_isActive); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 791, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":790
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkState(nvmlDevice_t device, unsigned int link, nvmlEnableState_t* isActive) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkState(device, link, isActive)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNvLinkState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":794
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkVersion(nvmlDevice_t device, unsigned int link, unsigned int* version) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkVersion(device, link, version)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkVersion(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_link, unsigned int *__pyx_v_version) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":795
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkVersion(nvmlDevice_t device, unsigned int link, unsigned int* version) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNvLinkVersion(device, link, version)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkVersion(__pyx_v_device, __pyx_v_link, __pyx_v_version); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 795, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":794
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkVersion(nvmlDevice_t device, unsigned int link, unsigned int* version) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkVersion(device, link, version)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNvLinkVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":798
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkCapability(nvmlDevice_t device, unsigned int link, nvmlNvLinkCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkCapability(device, link, capability, capResult)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkCapability(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_link, nvmlNvLinkCapability_t __pyx_v_capability, unsigned int *__pyx_v_capResult) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":799
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkCapability(nvmlDevice_t device, unsigned int link, nvmlNvLinkCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNvLinkCapability(device, link, capability, capResult)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkCapability(__pyx_v_device, __pyx_v_link, __pyx_v_capability, __pyx_v_capResult); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 799, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":798
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkCapability(nvmlDevice_t device, unsigned int link, nvmlNvLinkCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkCapability(device, link, capability, capResult)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNvLinkCapability", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":802
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkRemotePciInfo_v2(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t* pci) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkRemotePciInfo_v2(device, link, pci)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkRemotePciInfo_v2(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_link, nvmlPciInfo_t *__pyx_v_pci) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":803
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkRemotePciInfo_v2(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t* pci) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNvLinkRemotePciInfo_v2(device, link, pci)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkRemotePciInfo_v2(__pyx_v_device, __pyx_v_link, __pyx_v_pci); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 803, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":802
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkRemotePciInfo_v2(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t* pci) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkRemotePciInfo_v2(device, link, pci)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNvLinkRemotePciInfo_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":806
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkErrorCounter(nvmlDevice_t device, unsigned int link, nvmlNvLinkErrorCounter_t counter, unsigned long long* counterValue) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkErrorCounter(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_link, nvmlNvLinkErrorCounter_t __pyx_v_counter, unsigned PY_LONG_LONG *__pyx_v_counterValue) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":807
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkErrorCounter(nvmlDevice_t device, unsigned int link, nvmlNvLinkErrorCounter_t counter, unsigned long long* counterValue) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkErrorCounter(__pyx_v_device, __pyx_v_link, __pyx_v_counter, __pyx_v_counterValue); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 807, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":806
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkErrorCounter(nvmlDevice_t device, unsigned int link, nvmlNvLinkErrorCounter_t counter, unsigned long long* counterValue) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkErrorCounter(device, link, counter, counterValue)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNvLinkErrorCounter", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":810
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceResetNvLinkErrorCounters(nvmlDevice_t device, unsigned int link) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceResetNvLinkErrorCounters(device, link)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetNvLinkErrorCounters(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_link) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":811
 * 
 * cdef nvmlReturn_t nvmlDeviceResetNvLinkErrorCounters(nvmlDevice_t device, unsigned int link) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceResetNvLinkErrorCounters(device, link)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceResetNvLinkErrorCounters(__pyx_v_device, __pyx_v_link); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 811, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":810
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceResetNvLinkErrorCounters(nvmlDevice_t device, unsigned int link) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceResetNvLinkErrorCounters(device, link)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceResetNvLinkErrorCounters", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":814
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkRemoteDeviceType(nvmlDevice_t device, unsigned int link, nvmlIntNvLinkDeviceType_t* pNvLinkDeviceType) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkRemoteDeviceType(device, link, pNvLinkDeviceType)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkRemoteDeviceType(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_link, nvmlIntNvLinkDeviceType_t *__pyx_v_pNvLinkDeviceType) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":815
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkRemoteDeviceType(nvmlDevice_t device, unsigned int link, nvmlIntNvLinkDeviceType_t* pNvLinkDeviceType) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNvLinkRemoteDeviceType(device, link, pNvLinkDeviceType)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkRemoteDeviceType(__pyx_v_device, __pyx_v_link, __pyx_v_pNvLinkDeviceType); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 815, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":814
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkRemoteDeviceType(nvmlDevice_t device, unsigned int link, nvmlIntNvLinkDeviceType_t* pNvLinkDeviceType) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkRemoteDeviceType(device, link, pNvLinkDeviceType)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNvLinkRemoteDeviceType", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":818
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice_t device, nvmlNvLinkPowerThres_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice_t __pyx_v_device, nvmlNvLinkPowerThres_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":819
 * 
 * cdef nvmlReturn_t nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice_t device, nvmlNvLinkPowerThres_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetNvLinkDeviceLowPowerThreshold(__pyx_v_device, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 819, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":818
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice_t device, nvmlNvLinkPowerThres_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetNvLinkDeviceLowPowerThreshold", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":822
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemSetNvlinkBwMode(unsigned int nvlinkBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemSetNvlinkBwMode(nvlinkBwMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetNvlinkBwMode(unsigned int __pyx_v_nvlinkBwMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":823
 * 
 * cdef nvmlReturn_t nvmlSystemSetNvlinkBwMode(unsigned int nvlinkBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemSetNvlinkBwMode(nvlinkBwMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemSetNvlinkBwMode(__pyx_v_nvlinkBwMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 823, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":822
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemSetNvlinkBwMode(unsigned int nvlinkBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemSetNvlinkBwMode(nvlinkBwMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemSetNvlinkBwMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":826
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetNvlinkBwMode(unsigned int* nvlinkBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetNvlinkBwMode(nvlinkBwMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetNvlinkBwMode(unsigned int *__pyx_v_nvlinkBwMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":827
 * 
 * cdef nvmlReturn_t nvmlSystemGetNvlinkBwMode(unsigned int* nvlinkBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemGetNvlinkBwMode(nvlinkBwMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetNvlinkBwMode(__pyx_v_nvlinkBwMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 827, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":826
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemGetNvlinkBwMode(unsigned int* nvlinkBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemGetNvlinkBwMode(nvlinkBwMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemGetNvlinkBwMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":830
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvlinkSupportedBwModes(nvmlDevice_t device, nvmlNvlinkSupportedBwModes_t* supportedBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvlinkSupportedBwModes(device, supportedBwMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvlinkSupportedBwModes(nvmlDevice_t __pyx_v_device, nvmlNvlinkSupportedBwModes_t *__pyx_v_supportedBwMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":831
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvlinkSupportedBwModes(nvmlDevice_t device, nvmlNvlinkSupportedBwModes_t* supportedBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNvlinkSupportedBwModes(device, supportedBwMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvlinkSupportedBwModes(__pyx_v_device, __pyx_v_supportedBwMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 831, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":830
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvlinkSupportedBwModes(nvmlDevice_t device, nvmlNvlinkSupportedBwModes_t* supportedBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvlinkSupportedBwModes(device, supportedBwMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNvlinkSupportedBwModes", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":834
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvlinkBwMode(nvmlDevice_t device, nvmlNvlinkGetBwMode_t* getBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvlinkBwMode(device, getBwMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvlinkBwMode(nvmlDevice_t __pyx_v_device, nvmlNvlinkGetBwMode_t *__pyx_v_getBwMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":835
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvlinkBwMode(nvmlDevice_t device, nvmlNvlinkGetBwMode_t* getBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNvlinkBwMode(device, getBwMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvlinkBwMode(__pyx_v_device, __pyx_v_getBwMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 835, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":834
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvlinkBwMode(nvmlDevice_t device, nvmlNvlinkGetBwMode_t* getBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvlinkBwMode(device, getBwMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNvlinkBwMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":838
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetNvlinkBwMode(nvmlDevice_t device, nvmlNvlinkSetBwMode_t* setBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetNvlinkBwMode(device, setBwMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetNvlinkBwMode(nvmlDevice_t __pyx_v_device, nvmlNvlinkSetBwMode_t *__pyx_v_setBwMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":839
 * 
 * cdef nvmlReturn_t nvmlDeviceSetNvlinkBwMode(nvmlDevice_t device, nvmlNvlinkSetBwMode_t* setBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetNvlinkBwMode(device, setBwMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetNvlinkBwMode(__pyx_v_device, __pyx_v_setBwMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 839, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":838
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetNvlinkBwMode(nvmlDevice_t device, nvmlNvlinkSetBwMode_t* setBwMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetNvlinkBwMode(device, setBwMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetNvlinkBwMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":842
 * 
 * 
 * cdef nvmlReturn_t nvmlEventSetCreate(nvmlEventSet_t* set) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlEventSetCreate(set)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetCreate(nvmlEventSet_t *__pyx_v_set) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":843
 * 
 * cdef nvmlReturn_t nvmlEventSetCreate(nvmlEventSet_t* set) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlEventSetCreate(set)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlEventSetCreate(__pyx_v_set); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 843, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":842
 * 
 * 
 * cdef nvmlReturn_t nvmlEventSetCreate(nvmlEventSet_t* set) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlEventSetCreate(set)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlEventSetCreate", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":846
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceRegisterEvents(nvmlDevice_t device, unsigned long long eventTypes, nvmlEventSet_t set) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceRegisterEvents(device, eventTypes, set)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceRegisterEvents(nvmlDevice_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_eventTypes, nvmlEventSet_t __pyx_v_set) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":847
 * 
 * cdef nvmlReturn_t nvmlDeviceRegisterEvents(nvmlDevice_t device, unsigned long long eventTypes, nvmlEventSet_t set) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceRegisterEvents(device, eventTypes, set)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceRegisterEvents(__pyx_v_device, __pyx_v_eventTypes, __pyx_v_set); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 847, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":846
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceRegisterEvents(nvmlDevice_t device, unsigned long long eventTypes, nvmlEventSet_t set) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceRegisterEvents(device, eventTypes, set)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceRegisterEvents", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":850
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedEventTypes(nvmlDevice_t device, unsigned long long* eventTypes) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedEventTypes(device, eventTypes)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedEventTypes(nvmlDevice_t __pyx_v_device, unsigned PY_LONG_LONG *__pyx_v_eventTypes) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":851
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedEventTypes(nvmlDevice_t device, unsigned long long* eventTypes) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetSupportedEventTypes(device, eventTypes)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedEventTypes(__pyx_v_device, __pyx_v_eventTypes); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 851, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":850
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedEventTypes(nvmlDevice_t device, unsigned long long* eventTypes) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedEventTypes(device, eventTypes)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetSupportedEventTypes", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":854
 * 
 * 
 * cdef nvmlReturn_t nvmlEventSetWait_v2(nvmlEventSet_t set, nvmlEventData_t* data, unsigned int timeoutms) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlEventSetWait_v2(set, data, timeoutms)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetWait_v2(nvmlEventSet_t __pyx_v_set, nvmlEventData_t *__pyx_v_data, unsigned int __pyx_v_timeoutms) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":855
 * 
 * cdef nvmlReturn_t nvmlEventSetWait_v2(nvmlEventSet_t set, nvmlEventData_t* data, unsigned int timeoutms) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlEventSetWait_v2(set, data, timeoutms)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlEventSetWait_v2(__pyx_v_set, __pyx_v_data, __pyx_v_timeoutms); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 855, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":854
 * 
 * 
 * cdef nvmlReturn_t nvmlEventSetWait_v2(nvmlEventSet_t set, nvmlEventData_t* data, unsigned int timeoutms) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlEventSetWait_v2(set, data, timeoutms)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlEventSetWait_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":858
 * 
 * 
 * cdef nvmlReturn_t nvmlEventSetFree(nvmlEventSet_t set) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlEventSetFree(set)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetFree(nvmlEventSet_t __pyx_v_set) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":859
 * 
 * cdef nvmlReturn_t nvmlEventSetFree(nvmlEventSet_t set) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlEventSetFree(set)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlEventSetFree(__pyx_v_set); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 859, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":858
 * 
 * 
 * cdef nvmlReturn_t nvmlEventSetFree(nvmlEventSet_t set) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlEventSetFree(set)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlEventSetFree", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":862
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemEventSetCreate(nvmlSystemEventSetCreateRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemEventSetCreate(request)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetCreate(nvmlSystemEventSetCreateRequest_t *__pyx_v_request) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":863
 * 
 * cdef nvmlReturn_t nvmlSystemEventSetCreate(nvmlSystemEventSetCreateRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemEventSetCreate(request)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemEventSetCreate(__pyx_v_request); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 863, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":862
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemEventSetCreate(nvmlSystemEventSetCreateRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemEventSetCreate(request)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemEventSetCreate", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":866
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemEventSetFree(nvmlSystemEventSetFreeRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemEventSetFree(request)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetFree(nvmlSystemEventSetFreeRequest_t *__pyx_v_request) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":867
 * 
 * cdef nvmlReturn_t nvmlSystemEventSetFree(nvmlSystemEventSetFreeRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemEventSetFree(request)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemEventSetFree(__pyx_v_request); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 867, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":866
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemEventSetFree(nvmlSystemEventSetFreeRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemEventSetFree(request)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemEventSetFree", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":870
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemRegisterEvents(nvmlSystemRegisterEventRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemRegisterEvents(request)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemRegisterEvents(nvmlSystemRegisterEventRequest_t *__pyx_v_request) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":871
 * 
 * cdef nvmlReturn_t nvmlSystemRegisterEvents(nvmlSystemRegisterEventRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemRegisterEvents(request)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemRegisterEvents(__pyx_v_request); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 871, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":870
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemRegisterEvents(nvmlSystemRegisterEventRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemRegisterEvents(request)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemRegisterEvents", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":874
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemEventSetWait(nvmlSystemEventSetWaitRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemEventSetWait(request)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetWait(nvmlSystemEventSetWaitRequest_t *__pyx_v_request) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":875
 * 
 * cdef nvmlReturn_t nvmlSystemEventSetWait(nvmlSystemEventSetWaitRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSystemEventSetWait(request)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemEventSetWait(__pyx_v_request); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 875, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":874
 * 
 * 
 * cdef nvmlReturn_t nvmlSystemEventSetWait(nvmlSystemEventSetWaitRequest_t* request) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSystemEventSetWait(request)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSystemEventSetWait", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":878
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceModifyDrainState(nvmlPciInfo_t* pciInfo, nvmlEnableState_t newState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceModifyDrainState(pciInfo, newState)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceModifyDrainState(nvmlPciInfo_t *__pyx_v_pciInfo, nvmlEnableState_t __pyx_v_newState) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":879
 * 
 * cdef nvmlReturn_t nvmlDeviceModifyDrainState(nvmlPciInfo_t* pciInfo, nvmlEnableState_t newState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceModifyDrainState(pciInfo, newState)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceModifyDrainState(__pyx_v_pciInfo, __pyx_v_newState); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 879, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":878
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceModifyDrainState(nvmlPciInfo_t* pciInfo, nvmlEnableState_t newState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceModifyDrainState(pciInfo, newState)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceModifyDrainState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":882
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceQueryDrainState(nvmlPciInfo_t* pciInfo, nvmlEnableState_t* currentState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceQueryDrainState(pciInfo, currentState)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceQueryDrainState(nvmlPciInfo_t *__pyx_v_pciInfo, nvmlEnableState_t *__pyx_v_currentState) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":883
 * 
 * cdef nvmlReturn_t nvmlDeviceQueryDrainState(nvmlPciInfo_t* pciInfo, nvmlEnableState_t* currentState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceQueryDrainState(pciInfo, currentState)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceQueryDrainState(__pyx_v_pciInfo, __pyx_v_currentState); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 883, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":882
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceQueryDrainState(nvmlPciInfo_t* pciInfo, nvmlEnableState_t* currentState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceQueryDrainState(pciInfo, currentState)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceQueryDrainState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":886
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceRemoveGpu_v2(nvmlPciInfo_t* pciInfo, nvmlDetachGpuState_t gpuState, nvmlPcieLinkState_t linkState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceRemoveGpu_v2(pciInfo, gpuState, linkState)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceRemoveGpu_v2(nvmlPciInfo_t *__pyx_v_pciInfo, nvmlDetachGpuState_t __pyx_v_gpuState, nvmlPcieLinkState_t __pyx_v_linkState) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":887
 * 
 * cdef nvmlReturn_t nvmlDeviceRemoveGpu_v2(nvmlPciInfo_t* pciInfo, nvmlDetachGpuState_t gpuState, nvmlPcieLinkState_t linkState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceRemoveGpu_v2(pciInfo, gpuState, linkState)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceRemoveGpu_v2(__pyx_v_pciInfo, __pyx_v_gpuState, __pyx_v_linkState); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 887, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":886
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceRemoveGpu_v2(nvmlPciInfo_t* pciInfo, nvmlDetachGpuState_t gpuState, nvmlPcieLinkState_t linkState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceRemoveGpu_v2(pciInfo, gpuState, linkState)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceRemoveGpu_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":890
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceDiscoverGpus(nvmlPciInfo_t* pciInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceDiscoverGpus(pciInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceDiscoverGpus(nvmlPciInfo_t *__pyx_v_pciInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":891
 * 
 * cdef nvmlReturn_t nvmlDeviceDiscoverGpus(nvmlPciInfo_t* pciInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceDiscoverGpus(pciInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceDiscoverGpus(__pyx_v_pciInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 891, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":890
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceDiscoverGpus(nvmlPciInfo_t* pciInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceDiscoverGpus(pciInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceDiscoverGpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":894
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t* values) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFieldValues(device, valuesCount, values)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFieldValues(nvmlDevice_t __pyx_v_device, int __pyx_v_valuesCount, nvmlFieldValue_t *__pyx_v_values) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":895
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t* values) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetFieldValues(device, valuesCount, values)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFieldValues(__pyx_v_device, __pyx_v_valuesCount, __pyx_v_values); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 895, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":894
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t* values) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetFieldValues(device, valuesCount, values)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetFieldValues", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":898
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceClearFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t* values) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceClearFieldValues(device, valuesCount, values)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearFieldValues(nvmlDevice_t __pyx_v_device, int __pyx_v_valuesCount, nvmlFieldValue_t *__pyx_v_values) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":899
 * 
 * cdef nvmlReturn_t nvmlDeviceClearFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t* values) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceClearFieldValues(device, valuesCount, values)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearFieldValues(__pyx_v_device, __pyx_v_valuesCount, __pyx_v_values); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 899, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":898
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceClearFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t* values) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceClearFieldValues(device, valuesCount, values)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceClearFieldValues", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":902
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t* pVirtualMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVirtualizationMode(device, pVirtualMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVirtualizationMode(nvmlDevice_t __pyx_v_device, nvmlGpuVirtualizationMode_t *__pyx_v_pVirtualMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":903
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t* pVirtualMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVirtualizationMode(device, pVirtualMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVirtualizationMode(__pyx_v_device, __pyx_v_pVirtualMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 903, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":902
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t* pVirtualMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVirtualizationMode(device, pVirtualMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVirtualizationMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":906
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHostVgpuMode(nvmlDevice_t device, nvmlHostVgpuMode_t* pHostVgpuMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHostVgpuMode(device, pHostVgpuMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHostVgpuMode(nvmlDevice_t __pyx_v_device, nvmlHostVgpuMode_t *__pyx_v_pHostVgpuMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":907
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHostVgpuMode(nvmlDevice_t device, nvmlHostVgpuMode_t* pHostVgpuMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetHostVgpuMode(device, pHostVgpuMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHostVgpuMode(__pyx_v_device, __pyx_v_pHostVgpuMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 907, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":906
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHostVgpuMode(nvmlDevice_t device, nvmlHostVgpuMode_t* pHostVgpuMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHostVgpuMode(device, pHostVgpuMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetHostVgpuMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":910
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetVirtualizationMode(device, virtualMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVirtualizationMode(nvmlDevice_t __pyx_v_device, nvmlGpuVirtualizationMode_t __pyx_v_virtualMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":911
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetVirtualizationMode(device, virtualMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVirtualizationMode(__pyx_v_device, __pyx_v_virtualMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 911, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":910
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetVirtualizationMode(device, virtualMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetVirtualizationMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":914
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuHeterogeneousMode(nvmlDevice_t device, nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuHeterogeneousMode(device, pHeterogeneousMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuHeterogeneousMode(nvmlDevice_t __pyx_v_device, nvmlVgpuHeterogeneousMode_t *__pyx_v_pHeterogeneousMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":915
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuHeterogeneousMode(nvmlDevice_t device, nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuHeterogeneousMode(device, pHeterogeneousMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuHeterogeneousMode(__pyx_v_device, __pyx_v_pHeterogeneousMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 915, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":914
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuHeterogeneousMode(nvmlDevice_t device, nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuHeterogeneousMode(device, pHeterogeneousMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuHeterogeneousMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":918
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVgpuHeterogeneousMode(nvmlDevice_t device, const nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetVgpuHeterogeneousMode(device, pHeterogeneousMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuHeterogeneousMode(nvmlDevice_t __pyx_v_device, nvmlVgpuHeterogeneousMode_t const *__pyx_v_pHeterogeneousMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":919
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVgpuHeterogeneousMode(nvmlDevice_t device, const nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetVgpuHeterogeneousMode(device, pHeterogeneousMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVgpuHeterogeneousMode(__pyx_v_device, __pyx_v_pHeterogeneousMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 919, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":918
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVgpuHeterogeneousMode(nvmlDevice_t device, const nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetVgpuHeterogeneousMode(device, pHeterogeneousMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetVgpuHeterogeneousMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":922
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetPlacementId(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuPlacementId_t* pPlacement) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetPlacementId(vgpuInstance, pPlacement)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetPlacementId(nvmlVgpuInstance_t __pyx_v_vgpuInstance, nvmlVgpuPlacementId_t *__pyx_v_pPlacement) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":923
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetPlacementId(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuPlacementId_t* pPlacement) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetPlacementId(vgpuInstance, pPlacement)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetPlacementId(__pyx_v_vgpuInstance, __pyx_v_pPlacement); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 923, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":922
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetPlacementId(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuPlacementId_t* pPlacement) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetPlacementId(vgpuInstance, pPlacement)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetPlacementId", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":926
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuTypeSupportedPlacements(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuPlacementList_t* pPlacementList) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuTypeSupportedPlacements(device, vgpuTypeId, pPlacementList)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeSupportedPlacements(nvmlDevice_t __pyx_v_device, nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, nvmlVgpuPlacementList_t *__pyx_v_pPlacementList) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":927
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuTypeSupportedPlacements(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuPlacementList_t* pPlacementList) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuTypeSupportedPlacements(device, vgpuTypeId, pPlacementList)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuTypeSupportedPlacements(__pyx_v_device, __pyx_v_vgpuTypeId, __pyx_v_pPlacementList); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 927, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":926
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuTypeSupportedPlacements(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuPlacementList_t* pPlacementList) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuTypeSupportedPlacements(device, vgpuTypeId, pPlacementList)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuTypeSupportedPlacements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":930
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuTypeCreatablePlacements(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuPlacementList_t* pPlacementList) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuTypeCreatablePlacements(device, vgpuTypeId, pPlacementList)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeCreatablePlacements(nvmlDevice_t __pyx_v_device, nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, nvmlVgpuPlacementList_t *__pyx_v_pPlacementList) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":931
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuTypeCreatablePlacements(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuPlacementList_t* pPlacementList) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuTypeCreatablePlacements(device, vgpuTypeId, pPlacementList)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuTypeCreatablePlacements(__pyx_v_device, __pyx_v_vgpuTypeId, __pyx_v_pPlacementList); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 931, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":930
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuTypeCreatablePlacements(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuPlacementList_t* pPlacementList) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuTypeCreatablePlacements(device, vgpuTypeId, pPlacementList)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuTypeCreatablePlacements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":934
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetGspHeapSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* gspHeapSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetGspHeapSize(vgpuTypeId, gspHeapSize)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetGspHeapSize(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, unsigned PY_LONG_LONG *__pyx_v_gspHeapSize) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":935
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetGspHeapSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* gspHeapSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetGspHeapSize(vgpuTypeId, gspHeapSize)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetGspHeapSize(__pyx_v_vgpuTypeId, __pyx_v_gspHeapSize); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 935, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":934
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetGspHeapSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* gspHeapSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetGspHeapSize(vgpuTypeId, gspHeapSize)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetGspHeapSize", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":938
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetFbReservation(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* fbReservation) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetFbReservation(vgpuTypeId, fbReservation)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFbReservation(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, unsigned PY_LONG_LONG *__pyx_v_fbReservation) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":939
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetFbReservation(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* fbReservation) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetFbReservation(vgpuTypeId, fbReservation)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetFbReservation(__pyx_v_vgpuTypeId, __pyx_v_fbReservation); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 939, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":938
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetFbReservation(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* fbReservation) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetFbReservation(vgpuTypeId, fbReservation)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetFbReservation", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":942
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetRuntimeStateSize(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuRuntimeState_t* pState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetRuntimeStateSize(vgpuInstance, pState)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetRuntimeStateSize(nvmlVgpuInstance_t __pyx_v_vgpuInstance, nvmlVgpuRuntimeState_t *__pyx_v_pState) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":943
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetRuntimeStateSize(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuRuntimeState_t* pState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetRuntimeStateSize(vgpuInstance, pState)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetRuntimeStateSize(__pyx_v_vgpuInstance, __pyx_v_pState); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 943, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":942
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetRuntimeStateSize(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuRuntimeState_t* pState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetRuntimeStateSize(vgpuInstance, pState)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetRuntimeStateSize", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":946
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVgpuCapabilities(nvmlDevice_t device, nvmlDeviceVgpuCapability_t capability, nvmlEnableState_t state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetVgpuCapabilities(device, capability, state)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuCapabilities(nvmlDevice_t __pyx_v_device, nvmlDeviceVgpuCapability_t __pyx_v_capability, nvmlEnableState_t __pyx_v_state) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":947
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVgpuCapabilities(nvmlDevice_t device, nvmlDeviceVgpuCapability_t capability, nvmlEnableState_t state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetVgpuCapabilities(device, capability, state)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVgpuCapabilities(__pyx_v_device, __pyx_v_capability, __pyx_v_state); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 947, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":946
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVgpuCapabilities(nvmlDevice_t device, nvmlDeviceVgpuCapability_t capability, nvmlEnableState_t state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetVgpuCapabilities(device, capability, state)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetVgpuCapabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":950
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice_t device, nvmlGridLicensableFeatures_t* pGridLicensableFeatures) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGridLicensableFeatures_v4(device, pGridLicensableFeatures)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice_t __pyx_v_device, nvmlGridLicensableFeatures_t *__pyx_v_pGridLicensableFeatures) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":951
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice_t device, nvmlGridLicensableFeatures_t* pGridLicensableFeatures) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGridLicensableFeatures_v4(device, pGridLicensableFeatures)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGridLicensableFeatures_v4(__pyx_v_device, __pyx_v_pGridLicensableFeatures); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 951, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":950
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice_t device, nvmlGridLicensableFeatures_t* pGridLicensableFeatures) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGridLicensableFeatures_v4(device, pGridLicensableFeatures)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGridLicensableFeatures_v4", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":954
 * 
 * 
 * cdef nvmlReturn_t nvmlGetVgpuDriverCapabilities(nvmlVgpuDriverCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGetVgpuDriverCapabilities(capability, capResult)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuDriverCapabilities(nvmlVgpuDriverCapability_t __pyx_v_capability, unsigned int *__pyx_v_capResult) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":955
 * 
 * cdef nvmlReturn_t nvmlGetVgpuDriverCapabilities(nvmlVgpuDriverCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGetVgpuDriverCapabilities(capability, capResult)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetVgpuDriverCapabilities(__pyx_v_capability, __pyx_v_capResult); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 955, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":954
 * 
 * 
 * cdef nvmlReturn_t nvmlGetVgpuDriverCapabilities(nvmlVgpuDriverCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGetVgpuDriverCapabilities(capability, capResult)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGetVgpuDriverCapabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":958
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuCapabilities(nvmlDevice_t device, nvmlDeviceVgpuCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuCapabilities(device, capability, capResult)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuCapabilities(nvmlDevice_t __pyx_v_device, nvmlDeviceVgpuCapability_t __pyx_v_capability, unsigned int *__pyx_v_capResult) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":959
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuCapabilities(nvmlDevice_t device, nvmlDeviceVgpuCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuCapabilities(device, capability, capResult)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuCapabilities(__pyx_v_device, __pyx_v_capability, __pyx_v_capResult); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 959, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":958
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuCapabilities(nvmlDevice_t device, nvmlDeviceVgpuCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuCapabilities(device, capability, capResult)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuCapabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":962
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedVgpus(nvmlDevice_t device, unsigned int* vgpuCount, nvmlVgpuTypeId_t* vgpuTypeIds) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedVgpus(device, vgpuCount, vgpuTypeIds)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedVgpus(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_vgpuCount, nvmlVgpuTypeId_t *__pyx_v_vgpuTypeIds) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":963
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedVgpus(nvmlDevice_t device, unsigned int* vgpuCount, nvmlVgpuTypeId_t* vgpuTypeIds) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetSupportedVgpus(device, vgpuCount, vgpuTypeIds)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedVgpus(__pyx_v_device, __pyx_v_vgpuCount, __pyx_v_vgpuTypeIds); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 963, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":962
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSupportedVgpus(nvmlDevice_t device, unsigned int* vgpuCount, nvmlVgpuTypeId_t* vgpuTypeIds) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSupportedVgpus(device, vgpuCount, vgpuTypeIds)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetSupportedVgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":966
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCreatableVgpus(nvmlDevice_t device, unsigned int* vgpuCount, nvmlVgpuTypeId_t* vgpuTypeIds) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCreatableVgpus(device, vgpuCount, vgpuTypeIds)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCreatableVgpus(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_vgpuCount, nvmlVgpuTypeId_t *__pyx_v_vgpuTypeIds) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":967
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCreatableVgpus(nvmlDevice_t device, unsigned int* vgpuCount, nvmlVgpuTypeId_t* vgpuTypeIds) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCreatableVgpus(device, vgpuCount, vgpuTypeIds)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCreatableVgpus(__pyx_v_device, __pyx_v_vgpuCount, __pyx_v_vgpuTypeIds); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 967, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":966
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCreatableVgpus(nvmlDevice_t device, unsigned int* vgpuCount, nvmlVgpuTypeId_t* vgpuTypeIds) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCreatableVgpus(device, vgpuCount, vgpuTypeIds)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCreatableVgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":970
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t vgpuTypeId, char* vgpuTypeClass, unsigned int* size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetClass(vgpuTypeId, vgpuTypeClass, size)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, char *__pyx_v_vgpuTypeClass, unsigned int *__pyx_v_size) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":971
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t vgpuTypeId, char* vgpuTypeClass, unsigned int* size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetClass(vgpuTypeId, vgpuTypeClass, size)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetClass(__pyx_v_vgpuTypeId, __pyx_v_vgpuTypeClass, __pyx_v_size); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 971, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":970
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t vgpuTypeId, char* vgpuTypeClass, unsigned int* size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetClass(vgpuTypeId, vgpuTypeClass, size)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetClass", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":974
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetName(nvmlVgpuTypeId_t vgpuTypeId, char* vgpuTypeName, unsigned int* size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetName(vgpuTypeId, vgpuTypeName, size)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetName(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, char *__pyx_v_vgpuTypeName, unsigned int *__pyx_v_size) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":975
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetName(nvmlVgpuTypeId_t vgpuTypeId, char* vgpuTypeName, unsigned int* size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetName(vgpuTypeId, vgpuTypeName, size)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetName(__pyx_v_vgpuTypeId, __pyx_v_vgpuTypeName, __pyx_v_size); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 975, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":974
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetName(nvmlVgpuTypeId_t vgpuTypeId, char* vgpuTypeName, unsigned int* size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetName(vgpuTypeId, vgpuTypeName, size)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetName", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":978
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetGpuInstanceProfileId(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* gpuInstanceProfileId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetGpuInstanceProfileId(vgpuTypeId, gpuInstanceProfileId)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetGpuInstanceProfileId(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, unsigned int *__pyx_v_gpuInstanceProfileId) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":979
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetGpuInstanceProfileId(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* gpuInstanceProfileId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetGpuInstanceProfileId(vgpuTypeId, gpuInstanceProfileId)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetGpuInstanceProfileId(__pyx_v_vgpuTypeId, __pyx_v_gpuInstanceProfileId); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 979, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":978
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetGpuInstanceProfileId(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* gpuInstanceProfileId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetGpuInstanceProfileId(vgpuTypeId, gpuInstanceProfileId)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetGpuInstanceProfileId", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":982
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* deviceID, unsigned long long* subsystemID) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetDeviceID(vgpuTypeId, deviceID, subsystemID)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, unsigned PY_LONG_LONG *__pyx_v_deviceID, unsigned PY_LONG_LONG *__pyx_v_subsystemID) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":983
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* deviceID, unsigned long long* subsystemID) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetDeviceID(vgpuTypeId, deviceID, subsystemID)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetDeviceID(__pyx_v_vgpuTypeId, __pyx_v_deviceID, __pyx_v_subsystemID); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 983, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":982
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* deviceID, unsigned long long* subsystemID) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetDeviceID(vgpuTypeId, deviceID, subsystemID)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetDeviceID", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":986
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* fbSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetFramebufferSize(vgpuTypeId, fbSize)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, unsigned PY_LONG_LONG *__pyx_v_fbSize) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":987
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* fbSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetFramebufferSize(vgpuTypeId, fbSize)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetFramebufferSize(__pyx_v_vgpuTypeId, __pyx_v_fbSize); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 987, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":986
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long* fbSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetFramebufferSize(vgpuTypeId, fbSize)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetFramebufferSize", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":990
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* numDisplayHeads) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetNumDisplayHeads(vgpuTypeId, numDisplayHeads)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, unsigned int *__pyx_v_numDisplayHeads) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":991
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* numDisplayHeads) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetNumDisplayHeads(vgpuTypeId, numDisplayHeads)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetNumDisplayHeads(__pyx_v_vgpuTypeId, __pyx_v_numDisplayHeads); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 991, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":990
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* numDisplayHeads) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetNumDisplayHeads(vgpuTypeId, numDisplayHeads)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetNumDisplayHeads", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":994
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetResolution(nvmlVgpuTypeId_t vgpuTypeId, unsigned int displayIndex, unsigned int* xdim, unsigned int* ydim) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetResolution(vgpuTypeId, displayIndex, xdim, ydim)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetResolution(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, unsigned int __pyx_v_displayIndex, unsigned int *__pyx_v_xdim, unsigned int *__pyx_v_ydim) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":995
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetResolution(nvmlVgpuTypeId_t vgpuTypeId, unsigned int displayIndex, unsigned int* xdim, unsigned int* ydim) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetResolution(vgpuTypeId, displayIndex, xdim, ydim)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetResolution(__pyx_v_vgpuTypeId, __pyx_v_displayIndex, __pyx_v_xdim, __pyx_v_ydim); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 995, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":994
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetResolution(nvmlVgpuTypeId_t vgpuTypeId, unsigned int displayIndex, unsigned int* xdim, unsigned int* ydim) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetResolution(vgpuTypeId, displayIndex, xdim, ydim)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetResolution", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":998
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetLicense(nvmlVgpuTypeId_t vgpuTypeId, char* vgpuTypeLicenseString, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetLicense(vgpuTypeId, vgpuTypeLicenseString, size)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetLicense(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, char *__pyx_v_vgpuTypeLicenseString, unsigned int __pyx_v_size) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":999
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetLicense(nvmlVgpuTypeId_t vgpuTypeId, char* vgpuTypeLicenseString, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetLicense(vgpuTypeId, vgpuTypeLicenseString, size)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetLicense(__pyx_v_vgpuTypeId, __pyx_v_vgpuTypeLicenseString, __pyx_v_size); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 999, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":998
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetLicense(nvmlVgpuTypeId_t vgpuTypeId, char* vgpuTypeLicenseString, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetLicense(vgpuTypeId, vgpuTypeLicenseString, size)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetLicense", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1002
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* frameRateLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetFrameRateLimit(vgpuTypeId, frameRateLimit)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, unsigned int *__pyx_v_frameRateLimit) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1003
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* frameRateLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetFrameRateLimit(vgpuTypeId, frameRateLimit)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetFrameRateLimit(__pyx_v_vgpuTypeId, __pyx_v_frameRateLimit); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1003, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1002
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* frameRateLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetFrameRateLimit(vgpuTypeId, frameRateLimit)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetFrameRateLimit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1006
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetMaxInstances(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, unsigned int* vgpuInstanceCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetMaxInstances(device, vgpuTypeId, vgpuInstanceCount)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstances(nvmlDevice_t __pyx_v_device, nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, unsigned int *__pyx_v_vgpuInstanceCount) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1007
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetMaxInstances(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, unsigned int* vgpuInstanceCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetMaxInstances(device, vgpuTypeId, vgpuInstanceCount)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetMaxInstances(__pyx_v_device, __pyx_v_vgpuTypeId, __pyx_v_vgpuInstanceCount); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1007, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1006
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetMaxInstances(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, unsigned int* vgpuInstanceCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetMaxInstances(device, vgpuTypeId, vgpuInstanceCount)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetMaxInstances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1010
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetMaxInstancesPerVm(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* vgpuInstanceCountPerVm) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetMaxInstancesPerVm(vgpuTypeId, vgpuInstanceCountPerVm)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstancesPerVm(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, unsigned int *__pyx_v_vgpuInstanceCountPerVm) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1011
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetMaxInstancesPerVm(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* vgpuInstanceCountPerVm) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetMaxInstancesPerVm(vgpuTypeId, vgpuInstanceCountPerVm)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetMaxInstancesPerVm(__pyx_v_vgpuTypeId, __pyx_v_vgpuInstanceCountPerVm); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1011, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1010
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetMaxInstancesPerVm(nvmlVgpuTypeId_t vgpuTypeId, unsigned int* vgpuInstanceCountPerVm) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetMaxInstancesPerVm(vgpuTypeId, vgpuInstanceCountPerVm)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetMaxInstancesPerVm", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1014
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetBAR1Info(nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuTypeBar1Info_t* bar1Info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetBAR1Info(vgpuTypeId, bar1Info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetBAR1Info(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, nvmlVgpuTypeBar1Info_t *__pyx_v_bar1Info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1015
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetBAR1Info(nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuTypeBar1Info_t* bar1Info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetBAR1Info(vgpuTypeId, bar1Info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetBAR1Info(__pyx_v_vgpuTypeId, __pyx_v_bar1Info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1015, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1014
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetBAR1Info(nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuTypeBar1Info_t* bar1Info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetBAR1Info(vgpuTypeId, bar1Info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetBAR1Info", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1018
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetActiveVgpus(nvmlDevice_t device, unsigned int* vgpuCount, nvmlVgpuInstance_t* vgpuInstances) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetActiveVgpus(device, vgpuCount, vgpuInstances)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetActiveVgpus(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_vgpuCount, nvmlVgpuInstance_t *__pyx_v_vgpuInstances) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1019
 * 
 * cdef nvmlReturn_t nvmlDeviceGetActiveVgpus(nvmlDevice_t device, unsigned int* vgpuCount, nvmlVgpuInstance_t* vgpuInstances) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetActiveVgpus(device, vgpuCount, vgpuInstances)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetActiveVgpus(__pyx_v_device, __pyx_v_vgpuCount, __pyx_v_vgpuInstances); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1019, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1018
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetActiveVgpus(nvmlDevice_t device, unsigned int* vgpuCount, nvmlVgpuInstance_t* vgpuInstances) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetActiveVgpus(device, vgpuCount, vgpuInstances)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetActiveVgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1022
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetVmID(nvmlVgpuInstance_t vgpuInstance, char* vmId, unsigned int size, nvmlVgpuVmIdType_t* vmIdType) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetVmID(vgpuInstance, vmId, size, vmIdType)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetVmID(nvmlVgpuInstance_t __pyx_v_vgpuInstance, char *__pyx_v_vmId, unsigned int __pyx_v_size, nvmlVgpuVmIdType_t *__pyx_v_vmIdType) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1023
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetVmID(nvmlVgpuInstance_t vgpuInstance, char* vmId, unsigned int size, nvmlVgpuVmIdType_t* vmIdType) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetVmID(vgpuInstance, vmId, size, vmIdType)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetVmID(__pyx_v_vgpuInstance, __pyx_v_vmId, __pyx_v_size, __pyx_v_vmIdType); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1023, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1022
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetVmID(nvmlVgpuInstance_t vgpuInstance, char* vmId, unsigned int size, nvmlVgpuVmIdType_t* vmIdType) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetVmID(vgpuInstance, vmId, size, vmIdType)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetVmID", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1026
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetUUID(nvmlVgpuInstance_t vgpuInstance, char* uuid, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetUUID(vgpuInstance, uuid, size)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetUUID(nvmlVgpuInstance_t __pyx_v_vgpuInstance, char *__pyx_v_uuid, unsigned int __pyx_v_size) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1027
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetUUID(nvmlVgpuInstance_t vgpuInstance, char* uuid, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetUUID(vgpuInstance, uuid, size)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetUUID(__pyx_v_vgpuInstance, __pyx_v_uuid, __pyx_v_size); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1027, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1026
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetUUID(nvmlVgpuInstance_t vgpuInstance, char* uuid, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetUUID(vgpuInstance, uuid, size)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetUUID", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1030
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t vgpuInstance, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetVmDriverVersion(vgpuInstance, version, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t __pyx_v_vgpuInstance, char *__pyx_v_version, unsigned int __pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1031
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t vgpuInstance, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetVmDriverVersion(vgpuInstance, version, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetVmDriverVersion(__pyx_v_vgpuInstance, __pyx_v_version, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1031, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1030
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t vgpuInstance, char* version, unsigned int length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetVmDriverVersion(vgpuInstance, version, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetVmDriverVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1034
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t vgpuInstance, unsigned long long* fbUsage) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetFbUsage(vgpuInstance, fbUsage)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned PY_LONG_LONG *__pyx_v_fbUsage) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1035
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t vgpuInstance, unsigned long long* fbUsage) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetFbUsage(vgpuInstance, fbUsage)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFbUsage(__pyx_v_vgpuInstance, __pyx_v_fbUsage); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1035, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1034
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t vgpuInstance, unsigned long long* fbUsage) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetFbUsage(vgpuInstance, fbUsage)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetFbUsage", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1038
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance_t vgpuInstance, unsigned int* licensed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetLicenseStatus(vgpuInstance, licensed)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned int *__pyx_v_licensed) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1039
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance_t vgpuInstance, unsigned int* licensed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetLicenseStatus(vgpuInstance, licensed)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetLicenseStatus(__pyx_v_vgpuInstance, __pyx_v_licensed); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1039, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1038
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance_t vgpuInstance, unsigned int* licensed) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetLicenseStatus(vgpuInstance, licensed)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetLicenseStatus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1042
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetType(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuTypeId_t* vgpuTypeId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetType(vgpuInstance, vgpuTypeId)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetType(nvmlVgpuInstance_t __pyx_v_vgpuInstance, nvmlVgpuTypeId_t *__pyx_v_vgpuTypeId) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1043
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetType(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuTypeId_t* vgpuTypeId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetType(vgpuInstance, vgpuTypeId)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetType(__pyx_v_vgpuInstance, __pyx_v_vgpuTypeId); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1043, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1042
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetType(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuTypeId_t* vgpuTypeId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetType(vgpuInstance, vgpuTypeId)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetType", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1046
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance_t vgpuInstance, unsigned int* frameRateLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetFrameRateLimit(vgpuInstance, frameRateLimit)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned int *__pyx_v_frameRateLimit) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1047
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance_t vgpuInstance, unsigned int* frameRateLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetFrameRateLimit(vgpuInstance, frameRateLimit)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFrameRateLimit(__pyx_v_vgpuInstance, __pyx_v_frameRateLimit); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1047, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1046
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance_t vgpuInstance, unsigned int* frameRateLimit) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetFrameRateLimit(vgpuInstance, frameRateLimit)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetFrameRateLimit", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1050
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEccMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t* eccMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetEccMode(vgpuInstance, eccMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEccMode(nvmlVgpuInstance_t __pyx_v_vgpuInstance, nvmlEnableState_t *__pyx_v_eccMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1051
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEccMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t* eccMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetEccMode(vgpuInstance, eccMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEccMode(__pyx_v_vgpuInstance, __pyx_v_eccMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1051, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1050
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEccMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t* eccMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetEccMode(vgpuInstance, eccMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetEccMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1054
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int* encoderCapacity) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetEncoderCapacity(vgpuInstance, encoderCapacity)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned int *__pyx_v_encoderCapacity) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1055
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int* encoderCapacity) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetEncoderCapacity(vgpuInstance, encoderCapacity)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEncoderCapacity(__pyx_v_vgpuInstance, __pyx_v_encoderCapacity); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1055, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1054
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int* encoderCapacity) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetEncoderCapacity(vgpuInstance, encoderCapacity)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetEncoderCapacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1058
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int encoderCapacity) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceSetEncoderCapacity(vgpuInstance, encoderCapacity)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned int __pyx_v_encoderCapacity) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1059
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int encoderCapacity) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceSetEncoderCapacity(vgpuInstance, encoderCapacity)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceSetEncoderCapacity(__pyx_v_vgpuInstance, __pyx_v_encoderCapacity); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1059, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1058
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int encoderCapacity) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceSetEncoderCapacity(vgpuInstance, encoderCapacity)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceSetEncoderCapacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1062
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int* sessionCount, unsigned int* averageFps, unsigned int* averageLatency) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetEncoderStats(vgpuInstance, sessionCount, averageFps, averageLatency)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned int *__pyx_v_sessionCount, unsigned int *__pyx_v_averageFps, unsigned int *__pyx_v_averageLatency) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1063
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int* sessionCount, unsigned int* averageFps, unsigned int* averageLatency) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetEncoderStats(vgpuInstance, sessionCount, averageFps, averageLatency)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEncoderStats(__pyx_v_vgpuInstance, __pyx_v_sessionCount, __pyx_v_averageFps, __pyx_v_averageLatency); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1063, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1062
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int* sessionCount, unsigned int* averageFps, unsigned int* averageLatency) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetEncoderStats(vgpuInstance, sessionCount, averageFps, averageLatency)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetEncoderStats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1066
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int* sessionCount, nvmlEncoderSessionInfo_t* sessionInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetEncoderSessions(vgpuInstance, sessionCount, sessionInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned int *__pyx_v_sessionCount, nvmlEncoderSessionInfo_t *__pyx_v_sessionInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1067
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int* sessionCount, nvmlEncoderSessionInfo_t* sessionInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetEncoderSessions(vgpuInstance, sessionCount, sessionInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEncoderSessions(__pyx_v_vgpuInstance, __pyx_v_sessionCount, __pyx_v_sessionInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1067, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1066
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int* sessionCount, nvmlEncoderSessionInfo_t* sessionInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetEncoderSessions(vgpuInstance, sessionCount, sessionInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetEncoderSessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1070
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFBCStats(nvmlVgpuInstance_t vgpuInstance, nvmlFBCStats_t* fbcStats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetFBCStats(vgpuInstance, fbcStats)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCStats(nvmlVgpuInstance_t __pyx_v_vgpuInstance, nvmlFBCStats_t *__pyx_v_fbcStats) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1071
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFBCStats(nvmlVgpuInstance_t vgpuInstance, nvmlFBCStats_t* fbcStats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetFBCStats(vgpuInstance, fbcStats)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFBCStats(__pyx_v_vgpuInstance, __pyx_v_fbcStats); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1071, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1070
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFBCStats(nvmlVgpuInstance_t vgpuInstance, nvmlFBCStats_t* fbcStats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetFBCStats(vgpuInstance, fbcStats)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetFBCStats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1074
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFBCSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int* sessionCount, nvmlFBCSessionInfo_t* sessionInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetFBCSessions(vgpuInstance, sessionCount, sessionInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCSessions(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned int *__pyx_v_sessionCount, nvmlFBCSessionInfo_t *__pyx_v_sessionInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1075
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFBCSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int* sessionCount, nvmlFBCSessionInfo_t* sessionInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetFBCSessions(vgpuInstance, sessionCount, sessionInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFBCSessions(__pyx_v_vgpuInstance, __pyx_v_sessionCount, __pyx_v_sessionInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1075, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1074
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetFBCSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int* sessionCount, nvmlFBCSessionInfo_t* sessionInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetFBCSessions(vgpuInstance, sessionCount, sessionInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetFBCSessions", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1078
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetGpuInstanceId(nvmlVgpuInstance_t vgpuInstance, unsigned int* gpuInstanceId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetGpuInstanceId(vgpuInstance, gpuInstanceId)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuInstanceId(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned int *__pyx_v_gpuInstanceId) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1079
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetGpuInstanceId(nvmlVgpuInstance_t vgpuInstance, unsigned int* gpuInstanceId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetGpuInstanceId(vgpuInstance, gpuInstanceId)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetGpuInstanceId(__pyx_v_vgpuInstance, __pyx_v_gpuInstanceId); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1079, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1078
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetGpuInstanceId(nvmlVgpuInstance_t vgpuInstance, unsigned int* gpuInstanceId) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetGpuInstanceId(vgpuInstance, gpuInstanceId)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetGpuInstanceId", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1082
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetGpuPciId(nvmlVgpuInstance_t vgpuInstance, char* vgpuPciId, unsigned int* length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetGpuPciId(vgpuInstance, vgpuPciId, length)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuPciId(nvmlVgpuInstance_t __pyx_v_vgpuInstance, char *__pyx_v_vgpuPciId, unsigned int *__pyx_v_length) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1083
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetGpuPciId(nvmlVgpuInstance_t vgpuInstance, char* vgpuPciId, unsigned int* length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetGpuPciId(vgpuInstance, vgpuPciId, length)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetGpuPciId(__pyx_v_vgpuInstance, __pyx_v_vgpuPciId, __pyx_v_length); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1083, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1082
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetGpuPciId(nvmlVgpuInstance_t vgpuInstance, char* vgpuPciId, unsigned int* length) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetGpuPciId(vgpuInstance, vgpuPciId, length)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetGpuPciId", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1086
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetCapabilities(nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetCapabilities(vgpuTypeId, capability, capResult)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetCapabilities(nvmlVgpuTypeId_t __pyx_v_vgpuTypeId, nvmlVgpuCapability_t __pyx_v_capability, unsigned int *__pyx_v_capResult) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1087
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetCapabilities(nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetCapabilities(vgpuTypeId, capability, capResult)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetCapabilities(__pyx_v_vgpuTypeId, __pyx_v_capability, __pyx_v_capResult); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1087, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1086
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetCapabilities(nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuCapability_t capability, unsigned int* capResult) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetCapabilities(vgpuTypeId, capability, capResult)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetCapabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1090
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance_t vgpuInstance, char* mdevUuid, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetMdevUUID(vgpuInstance, mdevUuid, size)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance_t __pyx_v_vgpuInstance, char *__pyx_v_mdevUuid, unsigned int __pyx_v_size) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1091
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance_t vgpuInstance, char* mdevUuid, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetMdevUUID(vgpuInstance, mdevUuid, size)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetMdevUUID(__pyx_v_vgpuInstance, __pyx_v_mdevUuid, __pyx_v_size); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1091, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1090
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetMdevUUID(nvmlVgpuInstance_t vgpuInstance, char* mdevUuid, unsigned int size) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetMdevUUID(vgpuInstance, mdevUuid, size)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetMdevUUID", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1094
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetCreatableVgpus(nvmlGpuInstance_t gpuInstance, nvmlVgpuTypeIdInfo_t* pVgpus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetCreatableVgpus(gpuInstance, pVgpus)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetCreatableVgpus(nvmlGpuInstance_t __pyx_v_gpuInstance, nvmlVgpuTypeIdInfo_t *__pyx_v_pVgpus) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1095
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetCreatableVgpus(nvmlGpuInstance_t gpuInstance, nvmlVgpuTypeIdInfo_t* pVgpus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetCreatableVgpus(gpuInstance, pVgpus)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetCreatableVgpus(__pyx_v_gpuInstance, __pyx_v_pVgpus); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1095, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1094
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetCreatableVgpus(nvmlGpuInstance_t gpuInstance, nvmlVgpuTypeIdInfo_t* pVgpus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetCreatableVgpus(gpuInstance, pVgpus)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetCreatableVgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1098
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetMaxInstancesPerGpuInstance(nvmlVgpuTypeMaxInstance_t* pMaxInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetMaxInstancesPerGpuInstance(pMaxInstance)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstancesPerGpuInstance(nvmlVgpuTypeMaxInstance_t *__pyx_v_pMaxInstance) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1099
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetMaxInstancesPerGpuInstance(nvmlVgpuTypeMaxInstance_t* pMaxInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuTypeGetMaxInstancesPerGpuInstance(pMaxInstance)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetMaxInstancesPerGpuInstance(__pyx_v_pMaxInstance); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1099, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1098
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuTypeGetMaxInstancesPerGpuInstance(nvmlVgpuTypeMaxInstance_t* pMaxInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuTypeGetMaxInstancesPerGpuInstance(pMaxInstance)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuTypeGetMaxInstancesPerGpuInstance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1102
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetActiveVgpus(nvmlGpuInstance_t gpuInstance, nvmlActiveVgpuInstanceInfo_t* pVgpuInstanceInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetActiveVgpus(gpuInstance, pVgpuInstanceInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetActiveVgpus(nvmlGpuInstance_t __pyx_v_gpuInstance, nvmlActiveVgpuInstanceInfo_t *__pyx_v_pVgpuInstanceInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1103
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetActiveVgpus(nvmlGpuInstance_t gpuInstance, nvmlActiveVgpuInstanceInfo_t* pVgpuInstanceInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetActiveVgpus(gpuInstance, pVgpuInstanceInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetActiveVgpus(__pyx_v_gpuInstance, __pyx_v_pVgpuInstanceInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1103, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1102
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetActiveVgpus(nvmlGpuInstance_t gpuInstance, nvmlActiveVgpuInstanceInfo_t* pVgpuInstanceInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetActiveVgpus(gpuInstance, pVgpuInstanceInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetActiveVgpus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1106
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceSetVgpuSchedulerState(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerState_t* pScheduler) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceSetVgpuSchedulerState(gpuInstance, pScheduler)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceSetVgpuSchedulerState(nvmlGpuInstance_t __pyx_v_gpuInstance, nvmlVgpuSchedulerState_t *__pyx_v_pScheduler) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1107
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceSetVgpuSchedulerState(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerState_t* pScheduler) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceSetVgpuSchedulerState(gpuInstance, pScheduler)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceSetVgpuSchedulerState(__pyx_v_gpuInstance, __pyx_v_pScheduler); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1107, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1106
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceSetVgpuSchedulerState(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerState_t* pScheduler) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceSetVgpuSchedulerState(gpuInstance, pScheduler)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceSetVgpuSchedulerState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1110
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuSchedulerState(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerStateInfo_t* pSchedulerStateInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetVgpuSchedulerState(gpuInstance, pSchedulerStateInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuSchedulerState(nvmlGpuInstance_t __pyx_v_gpuInstance, nvmlVgpuSchedulerStateInfo_t *__pyx_v_pSchedulerStateInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1111
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuSchedulerState(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerStateInfo_t* pSchedulerStateInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetVgpuSchedulerState(gpuInstance, pSchedulerStateInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuSchedulerState(__pyx_v_gpuInstance, __pyx_v_pSchedulerStateInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1111, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1110
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuSchedulerState(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerStateInfo_t* pSchedulerStateInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetVgpuSchedulerState(gpuInstance, pSchedulerStateInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetVgpuSchedulerState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1114
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuSchedulerLog(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerLogInfo_t* pSchedulerLogInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetVgpuSchedulerLog(gpuInstance, pSchedulerLogInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuSchedulerLog(nvmlGpuInstance_t __pyx_v_gpuInstance, nvmlVgpuSchedulerLogInfo_t *__pyx_v_pSchedulerLogInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1115
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuSchedulerLog(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerLogInfo_t* pSchedulerLogInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetVgpuSchedulerLog(gpuInstance, pSchedulerLogInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuSchedulerLog(__pyx_v_gpuInstance, __pyx_v_pSchedulerLogInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1115, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1114
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuSchedulerLog(nvmlGpuInstance_t gpuInstance, nvmlVgpuSchedulerLogInfo_t* pSchedulerLogInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetVgpuSchedulerLog(gpuInstance, pSchedulerLogInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetVgpuSchedulerLog", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1118
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuTypeCreatablePlacements(nvmlGpuInstance_t gpuInstance, nvmlVgpuCreatablePlacementInfo_t* pCreatablePlacementInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetVgpuTypeCreatablePlacements(gpuInstance, pCreatablePlacementInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuTypeCreatablePlacements(nvmlGpuInstance_t __pyx_v_gpuInstance, nvmlVgpuCreatablePlacementInfo_t *__pyx_v_pCreatablePlacementInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1119
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuTypeCreatablePlacements(nvmlGpuInstance_t gpuInstance, nvmlVgpuCreatablePlacementInfo_t* pCreatablePlacementInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetVgpuTypeCreatablePlacements(gpuInstance, pCreatablePlacementInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuTypeCreatablePlacements(__pyx_v_gpuInstance, __pyx_v_pCreatablePlacementInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1119, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1118
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuTypeCreatablePlacements(nvmlGpuInstance_t gpuInstance, nvmlVgpuCreatablePlacementInfo_t* pCreatablePlacementInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetVgpuTypeCreatablePlacements(gpuInstance, pCreatablePlacementInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetVgpuTypeCreatablePlacements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1122
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuHeterogeneousMode(nvmlGpuInstance_t gpuInstance, nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetVgpuHeterogeneousMode(gpuInstance, pHeterogeneousMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuHeterogeneousMode(nvmlGpuInstance_t __pyx_v_gpuInstance, nvmlVgpuHeterogeneousMode_t *__pyx_v_pHeterogeneousMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1123
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuHeterogeneousMode(nvmlGpuInstance_t gpuInstance, nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetVgpuHeterogeneousMode(gpuInstance, pHeterogeneousMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuHeterogeneousMode(__pyx_v_gpuInstance, __pyx_v_pHeterogeneousMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1123, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1122
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetVgpuHeterogeneousMode(nvmlGpuInstance_t gpuInstance, nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetVgpuHeterogeneousMode(gpuInstance, pHeterogeneousMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetVgpuHeterogeneousMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1126
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceSetVgpuHeterogeneousMode(nvmlGpuInstance_t gpuInstance, const nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceSetVgpuHeterogeneousMode(gpuInstance, pHeterogeneousMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceSetVgpuHeterogeneousMode(nvmlGpuInstance_t __pyx_v_gpuInstance, nvmlVgpuHeterogeneousMode_t const *__pyx_v_pHeterogeneousMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1127
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceSetVgpuHeterogeneousMode(nvmlGpuInstance_t gpuInstance, const nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceSetVgpuHeterogeneousMode(gpuInstance, pHeterogeneousMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceSetVgpuHeterogeneousMode(__pyx_v_gpuInstance, __pyx_v_pHeterogeneousMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1127, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1126
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceSetVgpuHeterogeneousMode(nvmlGpuInstance_t gpuInstance, const nvmlVgpuHeterogeneousMode_t* pHeterogeneousMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceSetVgpuHeterogeneousMode(gpuInstance, pHeterogeneousMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceSetVgpuHeterogeneousMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1130
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetMetadata(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuMetadata_t* vgpuMetadata, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetMetadata(vgpuInstance, vgpuMetadata, bufferSize)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetMetadata(nvmlVgpuInstance_t __pyx_v_vgpuInstance, nvmlVgpuMetadata_t *__pyx_v_vgpuMetadata, unsigned int *__pyx_v_bufferSize) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1131
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetMetadata(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuMetadata_t* vgpuMetadata, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetMetadata(vgpuInstance, vgpuMetadata, bufferSize)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetMetadata(__pyx_v_vgpuInstance, __pyx_v_vgpuMetadata, __pyx_v_bufferSize); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1131, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1130
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetMetadata(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuMetadata_t* vgpuMetadata, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetMetadata(vgpuInstance, vgpuMetadata, bufferSize)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetMetadata", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1134
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuMetadata(nvmlDevice_t device, nvmlVgpuPgpuMetadata_t* pgpuMetadata, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuMetadata(device, pgpuMetadata, bufferSize)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuMetadata(nvmlDevice_t __pyx_v_device, nvmlVgpuPgpuMetadata_t *__pyx_v_pgpuMetadata, unsigned int *__pyx_v_bufferSize) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1135
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuMetadata(nvmlDevice_t device, nvmlVgpuPgpuMetadata_t* pgpuMetadata, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuMetadata(device, pgpuMetadata, bufferSize)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuMetadata(__pyx_v_device, __pyx_v_pgpuMetadata, __pyx_v_bufferSize); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1135, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1134
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuMetadata(nvmlDevice_t device, nvmlVgpuPgpuMetadata_t* pgpuMetadata, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuMetadata(device, pgpuMetadata, bufferSize)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuMetadata", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1138
 * 
 * 
 * cdef nvmlReturn_t nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t* vgpuMetadata, nvmlVgpuPgpuMetadata_t* pgpuMetadata, nvmlVgpuPgpuCompatibility_t* compatibilityInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGetVgpuCompatibility(vgpuMetadata, pgpuMetadata, compatibilityInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t *__pyx_v_vgpuMetadata, nvmlVgpuPgpuMetadata_t *__pyx_v_pgpuMetadata, nvmlVgpuPgpuCompatibility_t *__pyx_v_compatibilityInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1139
 * 
 * cdef nvmlReturn_t nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t* vgpuMetadata, nvmlVgpuPgpuMetadata_t* pgpuMetadata, nvmlVgpuPgpuCompatibility_t* compatibilityInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGetVgpuCompatibility(vgpuMetadata, pgpuMetadata, compatibilityInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetVgpuCompatibility(__pyx_v_vgpuMetadata, __pyx_v_pgpuMetadata, __pyx_v_compatibilityInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1139, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1138
 * 
 * 
 * cdef nvmlReturn_t nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t* vgpuMetadata, nvmlVgpuPgpuMetadata_t* pgpuMetadata, nvmlVgpuPgpuCompatibility_t* compatibilityInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGetVgpuCompatibility(vgpuMetadata, pgpuMetadata, compatibilityInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGetVgpuCompatibility", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1142
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPgpuMetadataString(nvmlDevice_t device, char* pgpuMetadata, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPgpuMetadataString(device, pgpuMetadata, bufferSize)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPgpuMetadataString(nvmlDevice_t __pyx_v_device, char *__pyx_v_pgpuMetadata, unsigned int *__pyx_v_bufferSize) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1143
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPgpuMetadataString(nvmlDevice_t device, char* pgpuMetadata, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPgpuMetadataString(device, pgpuMetadata, bufferSize)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPgpuMetadataString(__pyx_v_device, __pyx_v_pgpuMetadata, __pyx_v_bufferSize); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1143, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1142
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPgpuMetadataString(nvmlDevice_t device, char* pgpuMetadata, unsigned int* bufferSize) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPgpuMetadataString(device, pgpuMetadata, bufferSize)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPgpuMetadataString", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1146
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuSchedulerLog(nvmlDevice_t device, nvmlVgpuSchedulerLog_t* pSchedulerLog) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuSchedulerLog(device, pSchedulerLog)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerLog(nvmlDevice_t __pyx_v_device, nvmlVgpuSchedulerLog_t *__pyx_v_pSchedulerLog) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1147
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuSchedulerLog(nvmlDevice_t device, nvmlVgpuSchedulerLog_t* pSchedulerLog) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuSchedulerLog(device, pSchedulerLog)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuSchedulerLog(__pyx_v_device, __pyx_v_pSchedulerLog); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1147, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1146
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuSchedulerLog(nvmlDevice_t device, nvmlVgpuSchedulerLog_t* pSchedulerLog) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuSchedulerLog(device, pSchedulerLog)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuSchedulerLog", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1150
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerGetState_t* pSchedulerState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuSchedulerState(device, pSchedulerState)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerState(nvmlDevice_t __pyx_v_device, nvmlVgpuSchedulerGetState_t *__pyx_v_pSchedulerState) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1151
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerGetState_t* pSchedulerState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuSchedulerState(device, pSchedulerState)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuSchedulerState(__pyx_v_device, __pyx_v_pSchedulerState); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1151, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1150
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerGetState_t* pSchedulerState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuSchedulerState(device, pSchedulerState)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuSchedulerState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1154
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuSchedulerCapabilities(nvmlDevice_t device, nvmlVgpuSchedulerCapabilities_t* pCapabilities) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuSchedulerCapabilities(device, pCapabilities)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerCapabilities(nvmlDevice_t __pyx_v_device, nvmlVgpuSchedulerCapabilities_t *__pyx_v_pCapabilities) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1155
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuSchedulerCapabilities(nvmlDevice_t device, nvmlVgpuSchedulerCapabilities_t* pCapabilities) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuSchedulerCapabilities(device, pCapabilities)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuSchedulerCapabilities(__pyx_v_device, __pyx_v_pCapabilities); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1155, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1154
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuSchedulerCapabilities(nvmlDevice_t device, nvmlVgpuSchedulerCapabilities_t* pCapabilities) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuSchedulerCapabilities(device, pCapabilities)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuSchedulerCapabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1158
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerSetState_t* pSchedulerState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetVgpuSchedulerState(device, pSchedulerState)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuSchedulerState(nvmlDevice_t __pyx_v_device, nvmlVgpuSchedulerSetState_t *__pyx_v_pSchedulerState) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1159
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerSetState_t* pSchedulerState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetVgpuSchedulerState(device, pSchedulerState)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVgpuSchedulerState(__pyx_v_device, __pyx_v_pSchedulerState); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1159, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1158
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerSetState_t* pSchedulerState) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetVgpuSchedulerState(device, pSchedulerState)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetVgpuSchedulerState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1162
 * 
 * 
 * cdef nvmlReturn_t nvmlGetVgpuVersion(nvmlVgpuVersion_t* supported, nvmlVgpuVersion_t* current) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGetVgpuVersion(supported, current)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuVersion(nvmlVgpuVersion_t *__pyx_v_supported, nvmlVgpuVersion_t *__pyx_v_current) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1163
 * 
 * cdef nvmlReturn_t nvmlGetVgpuVersion(nvmlVgpuVersion_t* supported, nvmlVgpuVersion_t* current) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGetVgpuVersion(supported, current)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetVgpuVersion(__pyx_v_supported, __pyx_v_current); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1163, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1162
 * 
 * 
 * cdef nvmlReturn_t nvmlGetVgpuVersion(nvmlVgpuVersion_t* supported, nvmlVgpuVersion_t* current) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGetVgpuVersion(supported, current)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGetVgpuVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1166
 * 
 * 
 * cdef nvmlReturn_t nvmlSetVgpuVersion(nvmlVgpuVersion_t* vgpuVersion) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSetVgpuVersion(vgpuVersion)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlSetVgpuVersion(nvmlVgpuVersion_t *__pyx_v_vgpuVersion) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1167
 * 
 * cdef nvmlReturn_t nvmlSetVgpuVersion(nvmlVgpuVersion_t* vgpuVersion) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlSetVgpuVersion(vgpuVersion)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSetVgpuVersion(__pyx_v_vgpuVersion); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1167, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1166
 * 
 * 
 * cdef nvmlReturn_t nvmlSetVgpuVersion(nvmlVgpuVersion_t* vgpuVersion) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlSetVgpuVersion(vgpuVersion)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlSetVgpuVersion", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1170
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlValueType_t* sampleValType, unsigned int* vgpuInstanceSamplesCount, nvmlVgpuInstanceUtilizationSample_t* utilizationSamples) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuUtilization(device, lastSeenTimeStamp, sampleValType, vgpuInstanceSamplesCount, utilizationSamples)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuUtilization(nvmlDevice_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_lastSeenTimeStamp, nvmlValueType_t *__pyx_v_sampleValType, unsigned int *__pyx_v_vgpuInstanceSamplesCount, nvmlVgpuInstanceUtilizationSample_t *__pyx_v_utilizationSamples) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1171
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlValueType_t* sampleValType, unsigned int* vgpuInstanceSamplesCount, nvmlVgpuInstanceUtilizationSample_t* utilizationSamples) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuUtilization(device, lastSeenTimeStamp, sampleValType, vgpuInstanceSamplesCount, utilizationSamples)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuUtilization(__pyx_v_device, __pyx_v_lastSeenTimeStamp, __pyx_v_sampleValType, __pyx_v_vgpuInstanceSamplesCount, __pyx_v_utilizationSamples); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1171, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1170
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, nvmlValueType_t* sampleValType, unsigned int* vgpuInstanceSamplesCount, nvmlVgpuInstanceUtilizationSample_t* utilizationSamples) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuUtilization(device, lastSeenTimeStamp, sampleValType, vgpuInstanceSamplesCount, utilizationSamples)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuUtilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1174
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuInstancesUtilizationInfo(nvmlDevice_t device, nvmlVgpuInstancesUtilizationInfo_t* vgpuUtilInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuInstancesUtilizationInfo(device, vgpuUtilInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuInstancesUtilizationInfo(nvmlDevice_t __pyx_v_device, nvmlVgpuInstancesUtilizationInfo_t *__pyx_v_vgpuUtilInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1175
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuInstancesUtilizationInfo(nvmlDevice_t device, nvmlVgpuInstancesUtilizationInfo_t* vgpuUtilInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuInstancesUtilizationInfo(device, vgpuUtilInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuInstancesUtilizationInfo(__pyx_v_device, __pyx_v_vgpuUtilInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1175, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1174
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuInstancesUtilizationInfo(nvmlDevice_t device, nvmlVgpuInstancesUtilizationInfo_t* vgpuUtilInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuInstancesUtilizationInfo(device, vgpuUtilInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuInstancesUtilizationInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1178
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, unsigned int* vgpuProcessSamplesCount, nvmlVgpuProcessUtilizationSample_t* utilizationSamples) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuProcessUtilization(device, lastSeenTimeStamp, vgpuProcessSamplesCount, utilizationSamples)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t __pyx_v_device, unsigned PY_LONG_LONG __pyx_v_lastSeenTimeStamp, unsigned int *__pyx_v_vgpuProcessSamplesCount, nvmlVgpuProcessUtilizationSample_t *__pyx_v_utilizationSamples) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1179
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, unsigned int* vgpuProcessSamplesCount, nvmlVgpuProcessUtilizationSample_t* utilizationSamples) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuProcessUtilization(device, lastSeenTimeStamp, vgpuProcessSamplesCount, utilizationSamples)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuProcessUtilization(__pyx_v_device, __pyx_v_lastSeenTimeStamp, __pyx_v_vgpuProcessSamplesCount, __pyx_v_utilizationSamples); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1179, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1178
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, unsigned int* vgpuProcessSamplesCount, nvmlVgpuProcessUtilizationSample_t* utilizationSamples) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuProcessUtilization(device, lastSeenTimeStamp, vgpuProcessSamplesCount, utilizationSamples)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuProcessUtilization", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1182
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuProcessesUtilizationInfo(nvmlDevice_t device, nvmlVgpuProcessesUtilizationInfo_t* vgpuProcUtilInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuProcessesUtilizationInfo(device, vgpuProcUtilInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessesUtilizationInfo(nvmlDevice_t __pyx_v_device, nvmlVgpuProcessesUtilizationInfo_t *__pyx_v_vgpuProcUtilInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1183
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuProcessesUtilizationInfo(nvmlDevice_t device, nvmlVgpuProcessesUtilizationInfo_t* vgpuProcUtilInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetVgpuProcessesUtilizationInfo(device, vgpuProcUtilInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuProcessesUtilizationInfo(__pyx_v_device, __pyx_v_vgpuProcUtilInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1183, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1182
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetVgpuProcessesUtilizationInfo(nvmlDevice_t device, nvmlVgpuProcessesUtilizationInfo_t* vgpuProcUtilInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetVgpuProcessesUtilizationInfo(device, vgpuProcUtilInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetVgpuProcessesUtilizationInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1186
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetAccountingMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetAccountingMode(vgpuInstance, mode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingMode(nvmlVgpuInstance_t __pyx_v_vgpuInstance, nvmlEnableState_t *__pyx_v_mode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1187
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetAccountingMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetAccountingMode(vgpuInstance, mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetAccountingMode(__pyx_v_vgpuInstance, __pyx_v_mode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1187, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1186
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetAccountingMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetAccountingMode(vgpuInstance, mode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetAccountingMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1190
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetAccountingPids(nvmlVgpuInstance_t vgpuInstance, unsigned int* count, unsigned int* pids) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetAccountingPids(vgpuInstance, count, pids)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingPids(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned int *__pyx_v_count, unsigned int *__pyx_v_pids) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1191
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetAccountingPids(nvmlVgpuInstance_t vgpuInstance, unsigned int* count, unsigned int* pids) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetAccountingPids(vgpuInstance, count, pids)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetAccountingPids(__pyx_v_vgpuInstance, __pyx_v_count, __pyx_v_pids); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1191, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1190
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetAccountingPids(nvmlVgpuInstance_t vgpuInstance, unsigned int* count, unsigned int* pids) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetAccountingPids(vgpuInstance, count, pids)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetAccountingPids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1194
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetAccountingStats(nvmlVgpuInstance_t vgpuInstance, unsigned int pid, nvmlAccountingStats_t* stats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetAccountingStats(vgpuInstance, pid, stats)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingStats(nvmlVgpuInstance_t __pyx_v_vgpuInstance, unsigned int __pyx_v_pid, nvmlAccountingStats_t *__pyx_v_stats) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1195
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetAccountingStats(nvmlVgpuInstance_t vgpuInstance, unsigned int pid, nvmlAccountingStats_t* stats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetAccountingStats(vgpuInstance, pid, stats)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetAccountingStats(__pyx_v_vgpuInstance, __pyx_v_pid, __pyx_v_stats); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1195, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1194
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetAccountingStats(nvmlVgpuInstance_t vgpuInstance, unsigned int pid, nvmlAccountingStats_t* stats) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetAccountingStats(vgpuInstance, pid, stats)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetAccountingStats", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1198
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceClearAccountingPids(nvmlVgpuInstance_t vgpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceClearAccountingPids(vgpuInstance)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceClearAccountingPids(nvmlVgpuInstance_t __pyx_v_vgpuInstance) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1199
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceClearAccountingPids(nvmlVgpuInstance_t vgpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceClearAccountingPids(vgpuInstance)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceClearAccountingPids(__pyx_v_vgpuInstance); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1199, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1198
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceClearAccountingPids(nvmlVgpuInstance_t vgpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceClearAccountingPids(vgpuInstance)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceClearAccountingPids", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1202
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetLicenseInfo_v2(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuLicenseInfo_t* licenseInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetLicenseInfo_v2(vgpuInstance, licenseInfo)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetLicenseInfo_v2(nvmlVgpuInstance_t __pyx_v_vgpuInstance, nvmlVgpuLicenseInfo_t *__pyx_v_licenseInfo) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1203
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetLicenseInfo_v2(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuLicenseInfo_t* licenseInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlVgpuInstanceGetLicenseInfo_v2(vgpuInstance, licenseInfo)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetLicenseInfo_v2(__pyx_v_vgpuInstance, __pyx_v_licenseInfo); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1203, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1202
 * 
 * 
 * cdef nvmlReturn_t nvmlVgpuInstanceGetLicenseInfo_v2(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuLicenseInfo_t* licenseInfo) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlVgpuInstanceGetLicenseInfo_v2(vgpuInstance, licenseInfo)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlVgpuInstanceGetLicenseInfo_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1206
 * 
 * 
 * cdef nvmlReturn_t nvmlGetExcludedDeviceCount(unsigned int* deviceCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGetExcludedDeviceCount(deviceCount)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetExcludedDeviceCount(unsigned int *__pyx_v_deviceCount) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1207
 * 
 * cdef nvmlReturn_t nvmlGetExcludedDeviceCount(unsigned int* deviceCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGetExcludedDeviceCount(deviceCount)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetExcludedDeviceCount(__pyx_v_deviceCount); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1207, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1206
 * 
 * 
 * cdef nvmlReturn_t nvmlGetExcludedDeviceCount(unsigned int* deviceCount) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGetExcludedDeviceCount(deviceCount)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGetExcludedDeviceCount", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1210
 * 
 * 
 * cdef nvmlReturn_t nvmlGetExcludedDeviceInfoByIndex(unsigned int index, nvmlExcludedDeviceInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGetExcludedDeviceInfoByIndex(index, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetExcludedDeviceInfoByIndex(unsigned int __pyx_v_index, nvmlExcludedDeviceInfo_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1211
 * 
 * cdef nvmlReturn_t nvmlGetExcludedDeviceInfoByIndex(unsigned int index, nvmlExcludedDeviceInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGetExcludedDeviceInfoByIndex(index, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetExcludedDeviceInfoByIndex(__pyx_v_index, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1211, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1210
 * 
 * 
 * cdef nvmlReturn_t nvmlGetExcludedDeviceInfoByIndex(unsigned int index, nvmlExcludedDeviceInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGetExcludedDeviceInfoByIndex(index, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGetExcludedDeviceInfoByIndex", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1214
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetMigMode(nvmlDevice_t device, unsigned int mode, nvmlReturn_t* activationStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetMigMode(device, mode, activationStatus)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetMigMode(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_mode, nvmlReturn_t *__pyx_v_activationStatus) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1215
 * 
 * cdef nvmlReturn_t nvmlDeviceSetMigMode(nvmlDevice_t device, unsigned int mode, nvmlReturn_t* activationStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetMigMode(device, mode, activationStatus)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetMigMode(__pyx_v_device, __pyx_v_mode, __pyx_v_activationStatus); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1215, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1214
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetMigMode(nvmlDevice_t device, unsigned int mode, nvmlReturn_t* activationStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetMigMode(device, mode, activationStatus)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetMigMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1218
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMigMode(nvmlDevice_t device, unsigned int* currentMode, unsigned int* pendingMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMigMode(device, currentMode, pendingMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMigMode(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_currentMode, unsigned int *__pyx_v_pendingMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1219
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMigMode(nvmlDevice_t device, unsigned int* currentMode, unsigned int* pendingMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMigMode(device, currentMode, pendingMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMigMode(__pyx_v_device, __pyx_v_currentMode, __pyx_v_pendingMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1219, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1218
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMigMode(nvmlDevice_t device, unsigned int* currentMode, unsigned int* pendingMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMigMode(device, currentMode, pendingMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMigMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1222
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceProfileInfoV(nvmlDevice_t device, unsigned int profile, nvmlGpuInstanceProfileInfo_v2_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstanceProfileInfoV(device, profile, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceProfileInfoV(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_profile, nvmlGpuInstanceProfileInfo_v2_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1223
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceProfileInfoV(nvmlDevice_t device, unsigned int profile, nvmlGpuInstanceProfileInfo_v2_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpuInstanceProfileInfoV(device, profile, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceProfileInfoV(__pyx_v_device, __pyx_v_profile, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1223, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1222
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceProfileInfoV(nvmlDevice_t device, unsigned int profile, nvmlGpuInstanceProfileInfo_v2_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstanceProfileInfoV(device, profile, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpuInstanceProfileInfoV", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1226
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstancePossiblePlacements_v2(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstancePlacement_t* placements, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstancePossiblePlacements_v2(device, profileId, placements, count)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstancePossiblePlacements_v2(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_profileId, nvmlGpuInstancePlacement_t *__pyx_v_placements, unsigned int *__pyx_v_count) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1227
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstancePossiblePlacements_v2(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstancePlacement_t* placements, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpuInstancePossiblePlacements_v2(device, profileId, placements, count)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstancePossiblePlacements_v2(__pyx_v_device, __pyx_v_profileId, __pyx_v_placements, __pyx_v_count); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1227, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1226
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstancePossiblePlacements_v2(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstancePlacement_t* placements, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstancePossiblePlacements_v2(device, profileId, placements, count)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpuInstancePossiblePlacements_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1230
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceRemainingCapacity(nvmlDevice_t device, unsigned int profileId, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstanceRemainingCapacity(device, profileId, count)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceRemainingCapacity(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_profileId, unsigned int *__pyx_v_count) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1231
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceRemainingCapacity(nvmlDevice_t device, unsigned int profileId, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpuInstanceRemainingCapacity(device, profileId, count)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceRemainingCapacity(__pyx_v_device, __pyx_v_profileId, __pyx_v_count); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1231, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1230
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceRemainingCapacity(nvmlDevice_t device, unsigned int profileId, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstanceRemainingCapacity(device, profileId, count)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpuInstanceRemainingCapacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1234
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceCreateGpuInstance(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstance_t* gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceCreateGpuInstance(device, profileId, gpuInstance)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceCreateGpuInstance(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_profileId, nvmlGpuInstance_t *__pyx_v_gpuInstance) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1235
 * 
 * cdef nvmlReturn_t nvmlDeviceCreateGpuInstance(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstance_t* gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceCreateGpuInstance(device, profileId, gpuInstance)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceCreateGpuInstance(__pyx_v_device, __pyx_v_profileId, __pyx_v_gpuInstance); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1235, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1234
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceCreateGpuInstance(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstance_t* gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceCreateGpuInstance(device, profileId, gpuInstance)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceCreateGpuInstance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1238
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceCreateGpuInstanceWithPlacement(nvmlDevice_t device, unsigned int profileId, const nvmlGpuInstancePlacement_t* placement, nvmlGpuInstance_t* gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceCreateGpuInstanceWithPlacement(device, profileId, placement, gpuInstance)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceCreateGpuInstanceWithPlacement(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_profileId, nvmlGpuInstancePlacement_t const *__pyx_v_placement, nvmlGpuInstance_t *__pyx_v_gpuInstance) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1239
 * 
 * cdef nvmlReturn_t nvmlDeviceCreateGpuInstanceWithPlacement(nvmlDevice_t device, unsigned int profileId, const nvmlGpuInstancePlacement_t* placement, nvmlGpuInstance_t* gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceCreateGpuInstanceWithPlacement(device, profileId, placement, gpuInstance)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceCreateGpuInstanceWithPlacement(__pyx_v_device, __pyx_v_profileId, __pyx_v_placement, __pyx_v_gpuInstance); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1239, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1238
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceCreateGpuInstanceWithPlacement(nvmlDevice_t device, unsigned int profileId, const nvmlGpuInstancePlacement_t* placement, nvmlGpuInstance_t* gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceCreateGpuInstanceWithPlacement(device, profileId, placement, gpuInstance)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceCreateGpuInstanceWithPlacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1242
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceDestroy(nvmlGpuInstance_t gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceDestroy(gpuInstance)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceDestroy(nvmlGpuInstance_t __pyx_v_gpuInstance) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1243
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceDestroy(nvmlGpuInstance_t gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceDestroy(gpuInstance)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceDestroy(__pyx_v_gpuInstance); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1243, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1242
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceDestroy(nvmlGpuInstance_t gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceDestroy(gpuInstance)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceDestroy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1246
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstances(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstance_t* gpuInstances, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstances(device, profileId, gpuInstances, count)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstances(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_profileId, nvmlGpuInstance_t *__pyx_v_gpuInstances, unsigned int *__pyx_v_count) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1247
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstances(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstance_t* gpuInstances, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpuInstances(device, profileId, gpuInstances, count)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstances(__pyx_v_device, __pyx_v_profileId, __pyx_v_gpuInstances, __pyx_v_count); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1247, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1246
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstances(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstance_t* gpuInstances, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstances(device, profileId, gpuInstances, count)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpuInstances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1250
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceById(nvmlDevice_t device, unsigned int id, nvmlGpuInstance_t* gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstanceById(device, id, gpuInstance)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceById(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_id, nvmlGpuInstance_t *__pyx_v_gpuInstance) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1251
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceById(nvmlDevice_t device, unsigned int id, nvmlGpuInstance_t* gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpuInstanceById(device, id, gpuInstance)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceById(__pyx_v_device, __pyx_v_id, __pyx_v_gpuInstance); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1251, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1250
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceById(nvmlDevice_t device, unsigned int id, nvmlGpuInstance_t* gpuInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstanceById(device, id, gpuInstance)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpuInstanceById", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1254
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetInfo(nvmlGpuInstance_t gpuInstance, nvmlGpuInstanceInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetInfo(gpuInstance, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetInfo(nvmlGpuInstance_t __pyx_v_gpuInstance, nvmlGpuInstanceInfo_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1255
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetInfo(nvmlGpuInstance_t gpuInstance, nvmlGpuInstanceInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetInfo(gpuInstance, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetInfo(__pyx_v_gpuInstance, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1255, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1254
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetInfo(nvmlGpuInstance_t gpuInstance, nvmlGpuInstanceInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetInfo(gpuInstance, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1258
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstanceProfileInfoV(nvmlGpuInstance_t gpuInstance, unsigned int profile, unsigned int engProfile, nvmlComputeInstanceProfileInfo_v2_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetComputeInstanceProfileInfoV(gpuInstance, profile, engProfile, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceProfileInfoV(nvmlGpuInstance_t __pyx_v_gpuInstance, unsigned int __pyx_v_profile, unsigned int __pyx_v_engProfile, nvmlComputeInstanceProfileInfo_v2_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1259
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstanceProfileInfoV(nvmlGpuInstance_t gpuInstance, unsigned int profile, unsigned int engProfile, nvmlComputeInstanceProfileInfo_v2_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetComputeInstanceProfileInfoV(gpuInstance, profile, engProfile, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstanceProfileInfoV(__pyx_v_gpuInstance, __pyx_v_profile, __pyx_v_engProfile, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1259, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1258
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstanceProfileInfoV(nvmlGpuInstance_t gpuInstance, unsigned int profile, unsigned int engProfile, nvmlComputeInstanceProfileInfo_v2_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetComputeInstanceProfileInfoV(gpuInstance, profile, engProfile, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetComputeInstanceProfileInfoV", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1262
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstanceRemainingCapacity(nvmlGpuInstance_t gpuInstance, unsigned int profileId, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetComputeInstanceRemainingCapacity(gpuInstance, profileId, count)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceRemainingCapacity(nvmlGpuInstance_t __pyx_v_gpuInstance, unsigned int __pyx_v_profileId, unsigned int *__pyx_v_count) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1263
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstanceRemainingCapacity(nvmlGpuInstance_t gpuInstance, unsigned int profileId, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetComputeInstanceRemainingCapacity(gpuInstance, profileId, count)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstanceRemainingCapacity(__pyx_v_gpuInstance, __pyx_v_profileId, __pyx_v_count); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1263, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1262
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstanceRemainingCapacity(nvmlGpuInstance_t gpuInstance, unsigned int profileId, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetComputeInstanceRemainingCapacity(gpuInstance, profileId, count)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetComputeInstanceRemainingCapacity", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1266
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstancePossiblePlacements(nvmlGpuInstance_t gpuInstance, unsigned int profileId, nvmlComputeInstancePlacement_t* placements, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpuInstance, profileId, placements, count)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstancePossiblePlacements(nvmlGpuInstance_t __pyx_v_gpuInstance, unsigned int __pyx_v_profileId, nvmlComputeInstancePlacement_t *__pyx_v_placements, unsigned int *__pyx_v_count) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1267
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstancePossiblePlacements(nvmlGpuInstance_t gpuInstance, unsigned int profileId, nvmlComputeInstancePlacement_t* placements, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpuInstance, profileId, placements, count)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstancePossiblePlacements(__pyx_v_gpuInstance, __pyx_v_profileId, __pyx_v_placements, __pyx_v_count); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1267, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1266
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstancePossiblePlacements(nvmlGpuInstance_t gpuInstance, unsigned int profileId, nvmlComputeInstancePlacement_t* placements, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetComputeInstancePossiblePlacements(gpuInstance, profileId, placements, count)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetComputeInstancePossiblePlacements", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1270
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceCreateComputeInstance(nvmlGpuInstance_t gpuInstance, unsigned int profileId, nvmlComputeInstance_t* computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceCreateComputeInstance(gpuInstance, profileId, computeInstance)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceCreateComputeInstance(nvmlGpuInstance_t __pyx_v_gpuInstance, unsigned int __pyx_v_profileId, nvmlComputeInstance_t *__pyx_v_computeInstance) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1271
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceCreateComputeInstance(nvmlGpuInstance_t gpuInstance, unsigned int profileId, nvmlComputeInstance_t* computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceCreateComputeInstance(gpuInstance, profileId, computeInstance)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceCreateComputeInstance(__pyx_v_gpuInstance, __pyx_v_profileId, __pyx_v_computeInstance); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1271, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1270
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceCreateComputeInstance(nvmlGpuInstance_t gpuInstance, unsigned int profileId, nvmlComputeInstance_t* computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceCreateComputeInstance(gpuInstance, profileId, computeInstance)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceCreateComputeInstance", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1274
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceCreateComputeInstanceWithPlacement(nvmlGpuInstance_t gpuInstance, unsigned int profileId, const nvmlComputeInstancePlacement_t* placement, nvmlComputeInstance_t* computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceCreateComputeInstanceWithPlacement(gpuInstance, profileId, placement, computeInstance)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceCreateComputeInstanceWithPlacement(nvmlGpuInstance_t __pyx_v_gpuInstance, unsigned int __pyx_v_profileId, nvmlComputeInstancePlacement_t const *__pyx_v_placement, nvmlComputeInstance_t *__pyx_v_computeInstance) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1275
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceCreateComputeInstanceWithPlacement(nvmlGpuInstance_t gpuInstance, unsigned int profileId, const nvmlComputeInstancePlacement_t* placement, nvmlComputeInstance_t* computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceCreateComputeInstanceWithPlacement(gpuInstance, profileId, placement, computeInstance)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceCreateComputeInstanceWithPlacement(__pyx_v_gpuInstance, __pyx_v_profileId, __pyx_v_placement, __pyx_v_computeInstance); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1275, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1274
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceCreateComputeInstanceWithPlacement(nvmlGpuInstance_t gpuInstance, unsigned int profileId, const nvmlComputeInstancePlacement_t* placement, nvmlComputeInstance_t* computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceCreateComputeInstanceWithPlacement(gpuInstance, profileId, placement, computeInstance)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceCreateComputeInstanceWithPlacement", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1278
 * 
 * 
 * cdef nvmlReturn_t nvmlComputeInstanceDestroy(nvmlComputeInstance_t computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlComputeInstanceDestroy(computeInstance)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlComputeInstanceDestroy(nvmlComputeInstance_t __pyx_v_computeInstance) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1279
 * 
 * cdef nvmlReturn_t nvmlComputeInstanceDestroy(nvmlComputeInstance_t computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlComputeInstanceDestroy(computeInstance)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlComputeInstanceDestroy(__pyx_v_computeInstance); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1279, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1278
 * 
 * 
 * cdef nvmlReturn_t nvmlComputeInstanceDestroy(nvmlComputeInstance_t computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlComputeInstanceDestroy(computeInstance)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlComputeInstanceDestroy", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1282
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstances(nvmlGpuInstance_t gpuInstance, unsigned int profileId, nvmlComputeInstance_t* computeInstances, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetComputeInstances(gpuInstance, profileId, computeInstances, count)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstances(nvmlGpuInstance_t __pyx_v_gpuInstance, unsigned int __pyx_v_profileId, nvmlComputeInstance_t *__pyx_v_computeInstances, unsigned int *__pyx_v_count) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1283
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstances(nvmlGpuInstance_t gpuInstance, unsigned int profileId, nvmlComputeInstance_t* computeInstances, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetComputeInstances(gpuInstance, profileId, computeInstances, count)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstances(__pyx_v_gpuInstance, __pyx_v_profileId, __pyx_v_computeInstances, __pyx_v_count); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1283, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1282
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstances(nvmlGpuInstance_t gpuInstance, unsigned int profileId, nvmlComputeInstance_t* computeInstances, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetComputeInstances(gpuInstance, profileId, computeInstances, count)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetComputeInstances", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1286
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstanceById(nvmlGpuInstance_t gpuInstance, unsigned int id, nvmlComputeInstance_t* computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetComputeInstanceById(gpuInstance, id, computeInstance)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceById(nvmlGpuInstance_t __pyx_v_gpuInstance, unsigned int __pyx_v_id, nvmlComputeInstance_t *__pyx_v_computeInstance) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1287
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstanceById(nvmlGpuInstance_t gpuInstance, unsigned int id, nvmlComputeInstance_t* computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpuInstanceGetComputeInstanceById(gpuInstance, id, computeInstance)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstanceById(__pyx_v_gpuInstance, __pyx_v_id, __pyx_v_computeInstance); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1287, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1286
 * 
 * 
 * cdef nvmlReturn_t nvmlGpuInstanceGetComputeInstanceById(nvmlGpuInstance_t gpuInstance, unsigned int id, nvmlComputeInstance_t* computeInstance) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpuInstanceGetComputeInstanceById(gpuInstance, id, computeInstance)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpuInstanceGetComputeInstanceById", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1290
 * 
 * 
 * cdef nvmlReturn_t nvmlComputeInstanceGetInfo_v2(nvmlComputeInstance_t computeInstance, nvmlComputeInstanceInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlComputeInstanceGetInfo_v2(computeInstance, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlComputeInstanceGetInfo_v2(nvmlComputeInstance_t __pyx_v_computeInstance, nvmlComputeInstanceInfo_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1291
 * 
 * cdef nvmlReturn_t nvmlComputeInstanceGetInfo_v2(nvmlComputeInstance_t computeInstance, nvmlComputeInstanceInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlComputeInstanceGetInfo_v2(computeInstance, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlComputeInstanceGetInfo_v2(__pyx_v_computeInstance, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1291, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1290
 * 
 * 
 * cdef nvmlReturn_t nvmlComputeInstanceGetInfo_v2(nvmlComputeInstance_t computeInstance, nvmlComputeInstanceInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlComputeInstanceGetInfo_v2(computeInstance, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlComputeInstanceGetInfo_v2", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1294
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceIsMigDeviceHandle(nvmlDevice_t device, unsigned int* isMigDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceIsMigDeviceHandle(device, isMigDevice)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceIsMigDeviceHandle(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_isMigDevice) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1295
 * 
 * cdef nvmlReturn_t nvmlDeviceIsMigDeviceHandle(nvmlDevice_t device, unsigned int* isMigDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceIsMigDeviceHandle(device, isMigDevice)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceIsMigDeviceHandle(__pyx_v_device, __pyx_v_isMigDevice); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1295, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1294
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceIsMigDeviceHandle(nvmlDevice_t device, unsigned int* isMigDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceIsMigDeviceHandle(device, isMigDevice)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceIsMigDeviceHandle", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1298
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceId(nvmlDevice_t device, unsigned int* id) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstanceId(device, id)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceId(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_id) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1299
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceId(nvmlDevice_t device, unsigned int* id) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpuInstanceId(device, id)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceId(__pyx_v_device, __pyx_v_id); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1299, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1298
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceId(nvmlDevice_t device, unsigned int* id) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstanceId(device, id)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpuInstanceId", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1302
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetComputeInstanceId(nvmlDevice_t device, unsigned int* id) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetComputeInstanceId(device, id)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeInstanceId(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_id) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1303
 * 
 * cdef nvmlReturn_t nvmlDeviceGetComputeInstanceId(nvmlDevice_t device, unsigned int* id) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetComputeInstanceId(device, id)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetComputeInstanceId(__pyx_v_device, __pyx_v_id); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1303, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1302
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetComputeInstanceId(nvmlDevice_t device, unsigned int* id) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetComputeInstanceId(device, id)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetComputeInstanceId", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1306
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t device, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMaxMigDeviceCount(device, count)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_count) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1307
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t device, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMaxMigDeviceCount(device, count)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxMigDeviceCount(__pyx_v_device, __pyx_v_count); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1307, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1306
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t device, unsigned int* count) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMaxMigDeviceCount(device, count)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMaxMigDeviceCount", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1310
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, unsigned int index, nvmlDevice_t* migDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMigDeviceHandleByIndex(device, index, migDevice)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_index, nvmlDevice_t *__pyx_v_migDevice) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1311
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, unsigned int index, nvmlDevice_t* migDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetMigDeviceHandleByIndex(device, index, migDevice)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMigDeviceHandleByIndex(__pyx_v_device, __pyx_v_index, __pyx_v_migDevice); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1311, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1310
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, unsigned int index, nvmlDevice_t* migDevice) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetMigDeviceHandleByIndex(device, index, migDevice)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetMigDeviceHandleByIndex", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1314
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migDevice, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDeviceHandleFromMigDeviceHandle(migDevice, device)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t __pyx_v_migDevice, nvmlDevice_t *__pyx_v_device) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1315
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migDevice, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetDeviceHandleFromMigDeviceHandle(migDevice, device)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDeviceHandleFromMigDeviceHandle(__pyx_v_migDevice, __pyx_v_device); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1315, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1314
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migDevice, nvmlDevice_t* device) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetDeviceHandleFromMigDeviceHandle(migDevice, device)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetDeviceHandleFromMigDeviceHandle", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1318
 * 
 * 
 * cdef nvmlReturn_t nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpmSampleGet(device, gpmSample)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmSampleGet(nvmlDevice_t __pyx_v_device, nvmlGpmSample_t __pyx_v_gpmSample) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1319
 * 
 * cdef nvmlReturn_t nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpmSampleGet(device, gpmSample)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmSampleGet(__pyx_v_device, __pyx_v_gpmSample); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1319, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1318
 * 
 * 
 * cdef nvmlReturn_t nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpmSampleGet(device, gpmSample)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpmSampleGet", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1322
 * 
 * 
 * cdef nvmlReturn_t nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuInstanceId, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpmMigSampleGet(device, gpuInstanceId, gpmSample)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmMigSampleGet(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_gpuInstanceId, nvmlGpmSample_t __pyx_v_gpmSample) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1323
 * 
 * cdef nvmlReturn_t nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuInstanceId, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpmMigSampleGet(device, gpuInstanceId, gpmSample)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmMigSampleGet(__pyx_v_device, __pyx_v_gpuInstanceId, __pyx_v_gpmSample); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1323, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1322
 * 
 * 
 * cdef nvmlReturn_t nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuInstanceId, nvmlGpmSample_t gpmSample) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpmMigSampleGet(device, gpuInstanceId, gpmSample)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpmMigSampleGet", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1326
 * 
 * 
 * cdef nvmlReturn_t nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t* gpmSupport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpmQueryDeviceSupport(device, gpmSupport)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmQueryDeviceSupport(nvmlDevice_t __pyx_v_device, nvmlGpmSupport_t *__pyx_v_gpmSupport) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1327
 * 
 * cdef nvmlReturn_t nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t* gpmSupport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpmQueryDeviceSupport(device, gpmSupport)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmQueryDeviceSupport(__pyx_v_device, __pyx_v_gpmSupport); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1327, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1326
 * 
 * 
 * cdef nvmlReturn_t nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t* gpmSupport) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpmQueryDeviceSupport(device, gpmSupport)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpmQueryDeviceSupport", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1330
 * 
 * 
 * cdef nvmlReturn_t nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t device, unsigned int* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpmQueryIfStreamingEnabled(device, state)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t __pyx_v_device, unsigned int *__pyx_v_state) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1331
 * 
 * cdef nvmlReturn_t nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t device, unsigned int* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpmQueryIfStreamingEnabled(device, state)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmQueryIfStreamingEnabled(__pyx_v_device, __pyx_v_state); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1331, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1330
 * 
 * 
 * cdef nvmlReturn_t nvmlGpmQueryIfStreamingEnabled(nvmlDevice_t device, unsigned int* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpmQueryIfStreamingEnabled(device, state)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpmQueryIfStreamingEnabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1334
 * 
 * 
 * cdef nvmlReturn_t nvmlGpmSetStreamingEnabled(nvmlDevice_t device, unsigned int state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpmSetStreamingEnabled(device, state)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmSetStreamingEnabled(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_state) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1335
 * 
 * cdef nvmlReturn_t nvmlGpmSetStreamingEnabled(nvmlDevice_t device, unsigned int state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlGpmSetStreamingEnabled(device, state)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmSetStreamingEnabled(__pyx_v_device, __pyx_v_state); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1335, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1334
 * 
 * 
 * cdef nvmlReturn_t nvmlGpmSetStreamingEnabled(nvmlDevice_t device, unsigned int state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlGpmSetStreamingEnabled(device, state)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlGpmSetStreamingEnabled", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1338
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCapabilities(nvmlDevice_t device, nvmlDeviceCapabilities_t* caps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCapabilities(device, caps)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCapabilities(nvmlDevice_t __pyx_v_device, nvmlDeviceCapabilities_t *__pyx_v_caps) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1339
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCapabilities(nvmlDevice_t device, nvmlDeviceCapabilities_t* caps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetCapabilities(device, caps)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCapabilities(__pyx_v_device, __pyx_v_caps); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1339, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1338
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetCapabilities(nvmlDevice_t device, nvmlDeviceCapabilities_t* caps) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetCapabilities(device, caps)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetCapabilities", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1342
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(nvmlDevice_t device, nvmlWorkloadPowerProfileRequestedProfiles_t* requestedProfiles) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(device, requestedProfiles)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(nvmlDevice_t __pyx_v_device, nvmlWorkloadPowerProfileRequestedProfiles_t *__pyx_v_requestedProfiles) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1343
 * 
 * cdef nvmlReturn_t nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(nvmlDevice_t device, nvmlWorkloadPowerProfileRequestedProfiles_t* requestedProfiles) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(device, requestedProfiles)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(__pyx_v_device, __pyx_v_requestedProfiles); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1343, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1342
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(nvmlDevice_t device, nvmlWorkloadPowerProfileRequestedProfiles_t* requestedProfiles) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceWorkloadPowerProfileClearRequestedProfiles(device, requestedProfiles)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceWorkloadPowerProfileClearRequestedProfiles", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1346
 * 
 * 
 * cdef nvmlReturn_t nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDevicePowerSmoothingActivatePresetProfile(device, profile)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice_t __pyx_v_device, nvmlPowerSmoothingProfile_t *__pyx_v_profile) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1347
 * 
 * cdef nvmlReturn_t nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDevicePowerSmoothingActivatePresetProfile(device, profile)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDevicePowerSmoothingActivatePresetProfile(__pyx_v_device, __pyx_v_profile); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1347, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1346
 * 
 * 
 * cdef nvmlReturn_t nvmlDevicePowerSmoothingActivatePresetProfile(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDevicePowerSmoothingActivatePresetProfile(device, profile)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDevicePowerSmoothingActivatePresetProfile", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1350
 * 
 * 
 * cdef nvmlReturn_t nvmlDevicePowerSmoothingUpdatePresetProfileParam(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDevicePowerSmoothingUpdatePresetProfileParam(device, profile)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingUpdatePresetProfileParam(nvmlDevice_t __pyx_v_device, nvmlPowerSmoothingProfile_t *__pyx_v_profile) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1351
 * 
 * cdef nvmlReturn_t nvmlDevicePowerSmoothingUpdatePresetProfileParam(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDevicePowerSmoothingUpdatePresetProfileParam(device, profile)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDevicePowerSmoothingUpdatePresetProfileParam(__pyx_v_device, __pyx_v_profile); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1351, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1350
 * 
 * 
 * cdef nvmlReturn_t nvmlDevicePowerSmoothingUpdatePresetProfileParam(nvmlDevice_t device, nvmlPowerSmoothingProfile_t* profile) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDevicePowerSmoothingUpdatePresetProfileParam(device, profile)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDevicePowerSmoothingUpdatePresetProfileParam", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1354
 * 
 * 
 * cdef nvmlReturn_t nvmlDevicePowerSmoothingSetState(nvmlDevice_t device, nvmlPowerSmoothingState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDevicePowerSmoothingSetState(device, state)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingSetState(nvmlDevice_t __pyx_v_device, nvmlPowerSmoothingState_t *__pyx_v_state) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1355
 * 
 * cdef nvmlReturn_t nvmlDevicePowerSmoothingSetState(nvmlDevice_t device, nvmlPowerSmoothingState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDevicePowerSmoothingSetState(device, state)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDevicePowerSmoothingSetState(__pyx_v_device, __pyx_v_state); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1355, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1354
 * 
 * 
 * cdef nvmlReturn_t nvmlDevicePowerSmoothingSetState(nvmlDevice_t device, nvmlPowerSmoothingState_t* state) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDevicePowerSmoothingSetState(device, state)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDevicePowerSmoothingSetState", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1358
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAddressingMode(nvmlDevice_t device, nvmlDeviceAddressingMode_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAddressingMode(device, mode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAddressingMode(nvmlDevice_t __pyx_v_device, nvmlDeviceAddressingMode_t *__pyx_v_mode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1359
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAddressingMode(nvmlDevice_t device, nvmlDeviceAddressingMode_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetAddressingMode(device, mode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAddressingMode(__pyx_v_device, __pyx_v_mode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1359, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1358
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetAddressingMode(nvmlDevice_t device, nvmlDeviceAddressingMode_t* mode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetAddressingMode(device, mode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetAddressingMode", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1362
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRepairStatus(nvmlDevice_t device, nvmlRepairStatus_t* repairStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRepairStatus(device, repairStatus)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRepairStatus(nvmlDevice_t __pyx_v_device, nvmlRepairStatus_t *__pyx_v_repairStatus) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1363
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRepairStatus(nvmlDevice_t device, nvmlRepairStatus_t* repairStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetRepairStatus(device, repairStatus)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRepairStatus(__pyx_v_device, __pyx_v_repairStatus); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1363, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1362
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetRepairStatus(nvmlDevice_t device, nvmlRepairStatus_t* repairStatus) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetRepairStatus(device, repairStatus)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetRepairStatus", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1366
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerMizerMode_v1(nvmlDevice_t device, nvmlDevicePowerMizerModes_v1_t* powerMizerMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerMizerMode_v1(device, powerMizerMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerMizerMode_v1(nvmlDevice_t __pyx_v_device, nvmlDevicePowerMizerModes_v1_t *__pyx_v_powerMizerMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1367
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerMizerMode_v1(nvmlDevice_t device, nvmlDevicePowerMizerModes_v1_t* powerMizerMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPowerMizerMode_v1(device, powerMizerMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerMizerMode_v1(__pyx_v_device, __pyx_v_powerMizerMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1367, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1366
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPowerMizerMode_v1(nvmlDevice_t device, nvmlDevicePowerMizerModes_v1_t* powerMizerMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPowerMizerMode_v1(device, powerMizerMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPowerMizerMode_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1370
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPowerMizerMode_v1(nvmlDevice_t device, nvmlDevicePowerMizerModes_v1_t* powerMizerMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetPowerMizerMode_v1(device, powerMizerMode)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerMizerMode_v1(nvmlDevice_t __pyx_v_device, nvmlDevicePowerMizerModes_v1_t *__pyx_v_powerMizerMode) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1371
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPowerMizerMode_v1(nvmlDevice_t device, nvmlDevicePowerMizerModes_v1_t* powerMizerMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetPowerMizerMode_v1(device, powerMizerMode)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPowerMizerMode_v1(__pyx_v_device, __pyx_v_powerMizerMode); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1371, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1370
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetPowerMizerMode_v1(nvmlDevice_t device, nvmlDevicePowerMizerModes_v1_t* powerMizerMode) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetPowerMizerMode_v1(device, powerMizerMode)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetPowerMizerMode_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1374
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPdi(nvmlDevice_t device, nvmlPdi_t* pdi) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPdi(device, pdi)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPdi(nvmlDevice_t __pyx_v_device, nvmlPdi_t *__pyx_v_pdi) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1375
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPdi(nvmlDevice_t device, nvmlPdi_t* pdi) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetPdi(device, pdi)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPdi(__pyx_v_device, __pyx_v_pdi); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1375, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1374
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetPdi(nvmlDevice_t device, nvmlPdi_t* pdi) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetPdi(device, pdi)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetPdi", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1378
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetHostname_v1(nvmlDevice_t device, nvmlHostname_v1_t* hostname) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetHostname_v1(device, hostname)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetHostname_v1(nvmlDevice_t __pyx_v_device, nvmlHostname_v1_t *__pyx_v_hostname) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1379
 * 
 * cdef nvmlReturn_t nvmlDeviceSetHostname_v1(nvmlDevice_t device, nvmlHostname_v1_t* hostname) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceSetHostname_v1(device, hostname)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetHostname_v1(__pyx_v_device, __pyx_v_hostname); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1379, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1378
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceSetHostname_v1(nvmlDevice_t device, nvmlHostname_v1_t* hostname) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceSetHostname_v1(device, hostname)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceSetHostname_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1382
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHostname_v1(nvmlDevice_t device, nvmlHostname_v1_t* hostname) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHostname_v1(device, hostname)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHostname_v1(nvmlDevice_t __pyx_v_device, nvmlHostname_v1_t *__pyx_v_hostname) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1383
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHostname_v1(nvmlDevice_t device, nvmlHostname_v1_t* hostname) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetHostname_v1(device, hostname)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHostname_v1(__pyx_v_device, __pyx_v_hostname); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1383, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1382
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetHostname_v1(nvmlDevice_t device, nvmlHostname_v1_t* hostname) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetHostname_v1(device, hostname)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetHostname_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1386
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkInfo(nvmlDevice_t device, nvmlNvLinkInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkInfo(device, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkInfo(nvmlDevice_t __pyx_v_device, nvmlNvLinkInfo_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1387
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkInfo(nvmlDevice_t device, nvmlNvLinkInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetNvLinkInfo(device, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkInfo(__pyx_v_device, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1387, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1386
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetNvLinkInfo(nvmlDevice_t device, nvmlNvLinkInfo_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetNvLinkInfo(device, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetNvLinkInfo", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1390
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceReadWritePRM_v1(nvmlDevice_t device, nvmlPRMTLV_v1_t* buffer) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceReadWritePRM_v1(device, buffer)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceReadWritePRM_v1(nvmlDevice_t __pyx_v_device, nvmlPRMTLV_v1_t *__pyx_v_buffer) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1391
 * 
 * cdef nvmlReturn_t nvmlDeviceReadWritePRM_v1(nvmlDevice_t device, nvmlPRMTLV_v1_t* buffer) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceReadWritePRM_v1(device, buffer)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceReadWritePRM_v1(__pyx_v_device, __pyx_v_buffer); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1391, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1390
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceReadWritePRM_v1(nvmlDevice_t device, nvmlPRMTLV_v1_t* buffer) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceReadWritePRM_v1(device, buffer)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceReadWritePRM_v1", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1394
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceProfileInfoByIdV(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstanceProfileInfo_v2_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstanceProfileInfoByIdV(device, profileId, info)
 * 
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceProfileInfoByIdV(nvmlDevice_t __pyx_v_device, unsigned int __pyx_v_profileId, nvmlGpuInstanceProfileInfo_v2_t *__pyx_v_info) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1395
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceProfileInfoByIdV(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstanceProfileInfo_v2_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetGpuInstanceProfileInfoByIdV(device, profileId, info)             # <<<<<<<<<<<<<<
 * 
 * 
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceProfileInfoByIdV(__pyx_v_device, __pyx_v_profileId, __pyx_v_info); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1395, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1394
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetGpuInstanceProfileInfoByIdV(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstanceProfileInfo_v2_t* info) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetGpuInstanceProfileInfoByIdV(device, profileId, info)
 * 
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetGpuInstanceProfileInfoByIdV", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}

/* "cuda/bindings/cy_nvml.pyx":1398
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(nvmlDevice_t device, nvmlEccSramUniqueUncorrectedErrorCounts_t* errorCounts) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(device, errorCounts)
*/

static nvmlReturn_t __pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(nvmlDevice_t __pyx_v_device, nvmlEccSramUniqueUncorrectedErrorCounts_t *__pyx_v_errorCounts) {
  nvmlReturn_t __pyx_r;
  nvmlReturn_t __pyx_t_1;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  PyGILState_STATE __pyx_gilstate_save;

  /* "cuda/bindings/cy_nvml.pyx":1399
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(nvmlDevice_t device, nvmlEccSramUniqueUncorrectedErrorCounts_t* errorCounts) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:
 *     return _nvml._nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(device, errorCounts)             # <<<<<<<<<<<<<<
*/
  __pyx_t_1 = __pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(__pyx_v_device, __pyx_v_errorCounts); if (unlikely(__pyx_t_1 == ((nvmlReturn_t)_NVMLRETURN_T_INTERNAL_LOADING_ERROR) && __Pyx_ErrOccurredWithGIL())) __PYX_ERR(0, 1399, __pyx_L1_error)
  __pyx_r = __pyx_t_1;
  goto __pyx_L0;

  /* "cuda/bindings/cy_nvml.pyx":1398
 * 
 * 
 * cdef nvmlReturn_t nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(nvmlDevice_t device, nvmlEccSramUniqueUncorrectedErrorCounts_t* errorCounts) except?_NVMLRETURN_T_INTERNAL_LOADING_ERROR nogil:             # <<<<<<<<<<<<<<
 *     return _nvml._nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts(device, errorCounts)
*/

  /* function exit code */
  __pyx_L1_error:;
  __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
  __Pyx_AddTraceback("cuda.bindings.cy_nvml.nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts", __pyx_clineno, __pyx_lineno, __pyx_filename);
  __pyx_r = _NVMLRETURN_T_INTERNAL_LOADING_ERROR;
  __Pyx_PyGILState_Release(__pyx_gilstate_save);
  __pyx_L0:;
  return __pyx_r;
}
/* #### Code section: module_exttypes ### */

static PyMethodDef __pyx_methods[] = {
  {0, 0, 0, 0}
};
/* #### Code section: initfunc_declarations ### */
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_InitConstants(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(__pyx_mstatetype *__pyx_mstate); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_CreateCodeObjects(__pyx_mstatetype *__pyx_mstate); /*proto*/
/* #### Code section: init_module ### */

static int __Pyx_modinit_global_init_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
  /*--- Global init code ---*/
  __Pyx_RefNannyFinishContext();
  return 0;
}

static int __Pyx_modinit_variable_export_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
  /*--- Variable export code ---*/
  __Pyx_RefNannyFinishContext();
  return 0;
}

static int __Pyx_modinit_function_export_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
  /*--- Function export code ---*/
  {
    __pyx_t_1 = __Pyx_ApiExport_GetApiDict(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    const char * __pyx_export_signature = __Pyx_PyBytes_AsString(__pyx_mstate_global->__pyx_kp_b_char_const_nvmlReturn_t_nvmlRetu);
    #if !CYTHON_ASSUME_SAFE_MACROS
    if (unlikely(!__pyx_export_signature)) __PYX_ERR(0, 1, __pyx_L1_error)
    #endif
    const char * __pyx_export_name = __pyx_export_signature + 14397;
    void (*const __pyx_export_pointers[])(void) = {(void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlErrorString, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetDriverVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetNVMLVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByPciBusId_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleBySerial, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByUUID, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetCudaDriverVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetCudaDriverVersion_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlComputeInstanceDestroy, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlComputeInstanceGetInfo_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeKeyRotationThresholdInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetConfComputeKeyRotationThresholdInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeCapabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearAccountingPids, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearCpuAffinity, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetGpuLockedClocks, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetMemoryLockedClocks, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetCpuAffinity, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceValidateInforom, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGspFirmwareVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPgpuMetadataString, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBoardPartNumber, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomImageVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetName, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSerial, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetUUID, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVbiosVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpcClkVfOffset, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemClkVfOffset, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCudaComputeCapability, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpcClkMinMaxVfOffset, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemClkMinMaxVfOffset, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearFieldValues, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFieldValues, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBAR1MemoryInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBrand, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBridgeChipInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBusType, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetC2cModeInfoV, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClkMonStatus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClockOffsets, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetClockOffsets, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClock, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinMaxClockOfPState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetClockInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxClockInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxCustomerBoostClock, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetComputeMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeGpuAttestationReport, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeGpuCertificate, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeMemSizeInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCoolerInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAddressingMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetArchitecture, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAttributes_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCapabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrentClockFreqs, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPerformanceModes, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerMizerMode_v1, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerMizerMode_v1, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuCapabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuCapabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDeviceHandleFromMigDeviceHandle, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceOnSameBoard, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetP2PStatus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyCommonAncestor, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDramEncryptionMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDramEncryptionMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDriverModel_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDriverModel, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceClearEccErrorCounts, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramEccErrorStatus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDefaultEccMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDisplayActive, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDisplayMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPersistenceMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPagesPendingStatus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAutoBoostedClocksEnabled, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEccMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAccountingMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAutoBoostedClocksEnabled, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetEccMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPersistenceMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDefaultAutoBoostedClocksEnabled, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderCapacity, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCStats, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeedRPM, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmSampleGet, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmQueryDeviceSupport, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDynamicPstatesInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuFabricInfoV, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuOperationMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetGpuOperationMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTopologyNearestGpus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVirtualizationMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVirtualizationMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGridLicensableFeatures_v4, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHostVgpuMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHostname_v1, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetHostname_v1, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMarginTemperature, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryErrorCounter, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTotalEccErrors, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetConfComputeProtectedMemoryUsage, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryInfo_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetNvLinkDeviceLowPowerThreshold, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvlinkBwMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetNvlinkBwMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvlinkSupportedBwModes, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceReadWritePRM_v1, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRetiredPages_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPciInfoExt, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPciInfo_v3, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieThroughput, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPdi, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPlatformInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingActivatePresetProfile, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingUpdatePresetProfileParam, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDevicePowerSmoothingSetState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerSource, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerManagementLimit_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRunningProcessDetailList, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessUtilization, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetProcessesUtilizationInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPerformanceState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedPerformanceStates, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRepairStatus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAPIRestriction, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetAPIRestriction, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRowRemapperHistogram, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSamples, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetTemperatureThreshold, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTemperatureThreshold, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTemperatureV, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetUtilizationRates, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuHeterogeneousMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuHeterogeneousMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuInstancesUtilizationInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuMetadata, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessesUtilizationInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerCapabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuSchedulerLog, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetVgpuSchedulerState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeCreatablePlacements, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuTypeSupportedPlacements, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstances, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceWorkloadPowerProfileClearRequestedProfiles, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrentClocksEventReasons, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedClocksEventReasons, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedEventTypes, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTotalEnergyConsumption, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetLastBBXFlushTime, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetConfComputeUnprotectedMemSize, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceRegisterEvents, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuUtilization, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetVgpuProcessUtilization, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingBufferSize, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAdaptiveClockInfoStatus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetBoardId, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeInstanceId, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrPcieLinkGeneration, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCurrPcieLinkWidth, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEnforcedPowerLimit, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeed, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceId, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuMaxPcieLinkGeneration, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetIndex, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetInforomConfigurationChecksum, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetIrqNum, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxMigDeviceCount, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxPcieLinkGeneration, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMaxPcieLinkWidth, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryBusWidth, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinorNumber, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetModuleId, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMultiGpuBoard, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumFans, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumGpuCores, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNumaNodeId, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieLinkMaxSpeed, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieReplayCounter, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPcieSpeed, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementDefaultLimit, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementLimit, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerUsage, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceIsMigDeviceHandle, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmQueryIfStreamingEnabled, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderSessions, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFBCSessions, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetComputeRunningProcesses_v3, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMPSComputeRunningProcesses_v3, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetActiveVgpus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCreatableVgpus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedVgpus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingPids, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetDecoderUtilization, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderUtilization, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGspFirmwareMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetJpgUtilization, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMigMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMinMaxFanSpeed, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetOfaUtilization, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetPowerManagementLimitConstraints, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedMemoryClocks, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetEncoderStats, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetRemappedRows, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceResetNvLinkErrorCounters, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetDefaultFanSpeed_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetPowerManagementLimit, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmSetStreamingEnabled, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetAccountingStats, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMigDeviceHandleByIndex, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanControlPolicy_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetFanControlPolicy, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpmMigSampleGet, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstancePossiblePlacements_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceCreateGpuInstanceWithPlacement, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceProfileInfoByIdV, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceProfileInfoV, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceCreateGpuInstance, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceById, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstances, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetThermalSettings, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkRemoteDeviceType, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkCapability, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkErrorCounter, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkRemotePciInfo_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetMigMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetFanSpeed_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetGpuInstanceRemainingCapacity, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetNvLinkVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetTargetFanSpeed, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetSupportedGraphicsClocks, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetFanSpeed_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetGpuLockedClocks, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceSetMemoryLockedClocks, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCpuAffinity, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCpuAffinityWithinScope, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetMemoryAffinity, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetCreate, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetFree, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlEventSetWait_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceDestroy, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetActiveVgpus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuTypeCreatablePlacements, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuHeterogeneousMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceSetVgpuHeterogeneousMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuSchedulerLog, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetVgpuSchedulerState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceSetVgpuSchedulerState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetCreatableVgpus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstancePossiblePlacements, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceCreateComputeInstanceWithPlacement, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceCreateComputeInstance, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceById, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstances, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceRemainingCapacity, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGpuInstanceGetComputeInstanceProfileInfoV, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceDiscoverGpus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceRemoveGpu_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceQueryDrainState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceModifyDrainState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeSettings, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetDriverBranch, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetCreate, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetFree, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemEventSetWait, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemRegisterEvents, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByUUIDV, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitSetLedState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetLedState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetPsuInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetFanSpeedInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetUnitInfo, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetDevices, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetTemperature, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuDriverCapabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceClearAccountingPids, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuPciId, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetMdevUUID, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetUUID, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetVmDriverVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetVmID, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEccMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCStats, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetLicenseInfo_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetMetadata, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetPlacementId, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetRuntimeStateSize, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetType, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFbUsage, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderCapacity, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFrameRateLimit, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetGpuInstanceId, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetLicenseStatus, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderSessions, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetFBCSessions, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingPids, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetEncoderStats, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceSetEncoderCapacity, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuInstanceGetAccountingStats, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuCompatibility, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetClass, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetName, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetLicense, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetCapabilities, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetBAR1Info, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFbReservation, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFramebufferSize, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetGspHeapSize, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetDeviceID, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetFrameRateLimit, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetGpuInstanceProfileId, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstancesPerVm, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetNumDisplayHeads, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetResolution, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlVgpuTypeGetMaxInstancesPerGpuInstance, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSetVgpuVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetVgpuVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetCount_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetExcludedDeviceCount, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetConfComputeGpusReadyState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetNvlinkBwMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetCount, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetHicVersion, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlInitWithFlags, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetConfComputeGpusReadyState, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemSetNvlinkBwMode, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetProcessName, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlDeviceGetHandleByIndex_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlGetExcludedDeviceInfoByIndex, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlUnitGetHandleByIndex, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlSystemGetTopologyGpuSet, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlInit_v2, (void (*)(void))&__pyx_f_4cuda_8bindings_7cy_nvml_nvmlShutdown, (void (*)(void)) NULL};
    void (*const *__pyx_export_pointer)(void) = __pyx_export_pointers;
    const char *__pyx_export_current_signature = __pyx_export_signature;
    while (*__pyx_export_pointer) {
      if (__Pyx_ExportFunction(__pyx_t_1, __pyx_export_name, *__pyx_export_pointer, __pyx_export_current_signature) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
      ++__pyx_export_pointer;
      __pyx_export_name = strchr(__pyx_export_name, '\0') + 1;
      __pyx_export_signature = strchr(__pyx_export_signature, '\0') + 1;
      if (*__pyx_export_signature != '\0') __pyx_export_current_signature = __pyx_export_signature;
    }
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  }
  __Pyx_RefNannyFinishContext();
  return 0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_RefNannyFinishContext();
  return -1;
}

static int __Pyx_modinit_type_init_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
  /*--- Type init code ---*/
  __Pyx_RefNannyFinishContext();
  return 0;
}

static int __Pyx_modinit_type_import_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
  /*--- Type import code ---*/
  __Pyx_RefNannyFinishContext();
  return 0;
}

static int __Pyx_modinit_variable_import_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
  /*--- Variable import code ---*/
  __Pyx_RefNannyFinishContext();
  return 0;
}

static int __Pyx_modinit_function_import_code(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  PyObject *__pyx_t_1 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
  /*--- Function import code ---*/
  {
    __pyx_t_1 = PyImport_ImportModule("cuda.bindings._internal._nvml"); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
    __Pyx_GOTREF(__pyx_t_1);
    const char * __pyx_import_signature = __Pyx_PyBytes_AsString(__pyx_mstate_global->__pyx_kp_b_char_const_nvmlReturn_t_nvmlRetu_2);
    #if !CYTHON_ASSUME_SAFE_MACROS
    if (unlikely(!__pyx_import_signature)) __PYX_ERR(0, 1, __pyx_L1_error)
    #endif
    const char * __pyx_import_name = __pyx_import_signature + 14397;
    void (**const __pyx_import_pointers[])(void) = {(void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlErrorString, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetDriverVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetNVMLVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByPciBusId_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleBySerial, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByUUID, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetCudaDriverVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetCudaDriverVersion_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlComputeInstanceDestroy, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlComputeInstanceGetInfo_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeKeyRotationThresholdInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemSetConfComputeKeyRotationThresholdInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearAccountingPids, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearCpuAffinity, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceResetGpuLockedClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceResetMemoryLockedClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetCpuAffinity, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceValidateInforom, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGspFirmwareVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPgpuMetadataString, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBoardPartNumber, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetInforomImageVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetName, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSerial, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetUUID, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVbiosVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpcClkVfOffset, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemClkVfOffset, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCudaComputeCapability, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpcClkMinMaxVfOffset, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemClkMinMaxVfOffset, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearFieldValues, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFieldValues, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBAR1MemoryInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBrand, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBridgeChipInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBusType, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetC2cModeInfoV, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClkMonStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClockOffsets, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetClockOffsets, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClock, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMinMaxClockOfPState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetClockInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxClockInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxCustomerBoostClock, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetComputeMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetComputeMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeGpuAttestationReport, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeGpuCertificate, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeMemSizeInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCoolerInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAddressingMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetArchitecture, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAttributes_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrentClockFreqs, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPerformanceModes, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerMizerMode_v1, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPowerMizerMode_v1, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVgpuCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDeviceHandleFromMigDeviceHandle, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceOnSameBoard, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetP2PStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTopologyCommonAncestor, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDramEncryptionMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDramEncryptionMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDriverModel_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDriverModel, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceClearEccErrorCounts, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSramEccErrorStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDefaultEccMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDisplayActive, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDisplayMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPersistenceMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRetiredPagesPendingStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAutoBoostedClocksEnabled, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEccMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetAccountingMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetAutoBoostedClocksEnabled, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetEccMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPersistenceMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDefaultAutoBoostedClocksEnabled, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderCapacity, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFBCStats, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanSpeedRPM, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmSampleGet, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmQueryDeviceSupport, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDynamicPstatesInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuFabricInfoV, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuOperationMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetGpuOperationMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTopologyNearestGpus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVirtualizationMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVirtualizationMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGridLicensableFeatures_v4, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHostVgpuMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHostname_v1, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetHostname_v1, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetInforomVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMarginTemperature, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryErrorCounter, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTotalEccErrors, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetConfComputeProtectedMemoryUsage, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryInfo_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetNvLinkDeviceLowPowerThreshold, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvlinkBwMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetNvlinkBwMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvlinkSupportedBwModes, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceReadWritePRM_v1, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRetiredPages, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRetiredPages_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPciInfoExt, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPciInfo_v3, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieThroughput, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPdi, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPlatformInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDevicePowerSmoothingActivatePresetProfile, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDevicePowerSmoothingUpdatePresetProfileParam, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDevicePowerSmoothingSetState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerSource, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPowerManagementLimit_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRunningProcessDetailList, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetProcessUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetProcessesUtilizationInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPerformanceState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedPerformanceStates, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRepairStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAPIRestriction, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetAPIRestriction, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRowRemapperHistogram, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSamples, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetTemperatureThreshold, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTemperatureThreshold, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTemperatureV, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetUtilizationRates, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuHeterogeneousMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVgpuHeterogeneousMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuInstancesUtilizationInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuMetadata, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuProcessesUtilizationInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuSchedulerCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuSchedulerState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuSchedulerLog, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetVgpuSchedulerState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuTypeCreatablePlacements, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuTypeSupportedPlacements, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetMaxInstances, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceWorkloadPowerProfileClearRequestedProfiles, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrentClocksEventReasons, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedClocksEventReasons, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedEventTypes, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTotalEnergyConsumption, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetLastBBXFlushTime, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetConfComputeUnprotectedMemSize, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceRegisterEvents, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetVgpuProcessUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingBufferSize, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAdaptiveClockInfoStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetBoardId, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetComputeInstanceId, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrPcieLinkGeneration, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCurrPcieLinkWidth, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEnforcedPowerLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanSpeed, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceId, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuMaxPcieLinkGeneration, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetIndex, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetInforomConfigurationChecksum, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetIrqNum, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxMigDeviceCount, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxPcieLinkGeneration, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMaxPcieLinkWidth, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryBusWidth, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMinorNumber, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetModuleId, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMultiGpuBoard, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNumFans, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNumGpuCores, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNumaNodeId, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieLinkMaxSpeed, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieReplayCounter, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPcieSpeed, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerManagementDefaultLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerManagementLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerUsage, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceIsMigDeviceHandle, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmQueryIfStreamingEnabled, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderSessions, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFBCSessions, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetComputeRunningProcesses_v3, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMPSComputeRunningProcesses_v3, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetActiveVgpus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCreatableVgpus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedVgpus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingPids, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetDecoderUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGspFirmwareMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetJpgUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMigMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMinMaxFanSpeed, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetOfaUtilization, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetPowerManagementLimitConstraints, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedMemoryClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetEncoderStats, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetRemappedRows, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceResetNvLinkErrorCounters, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetDefaultFanSpeed_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetPowerManagementLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmSetStreamingEnabled, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetAccountingStats, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMigDeviceHandleByIndex, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanControlPolicy_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetFanControlPolicy, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpmMigSampleGet, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstancePossiblePlacements_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceCreateGpuInstanceWithPlacement, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceProfileInfoByIdV, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceProfileInfoV, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceCreateGpuInstance, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceById, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstances, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetThermalSettings, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkRemoteDeviceType, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkCapability, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkErrorCounter, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkRemotePciInfo_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetMigMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetFanSpeed_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetGpuInstanceRemainingCapacity, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetNvLinkVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetTargetFanSpeed, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetSupportedGraphicsClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetFanSpeed_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetGpuLockedClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceSetMemoryLockedClocks, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCpuAffinity, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCpuAffinityWithinScope, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetMemoryAffinity, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlEventSetCreate, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlEventSetFree, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlEventSetWait_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceDestroy, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetActiveVgpus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuTypeCreatablePlacements, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuHeterogeneousMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceSetVgpuHeterogeneousMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuSchedulerLog, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetVgpuSchedulerState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceSetVgpuSchedulerState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetCreatableVgpus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstancePossiblePlacements, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceCreateComputeInstanceWithPlacement, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceCreateComputeInstance, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstanceById, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstances, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstanceRemainingCapacity, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGpuInstanceGetComputeInstanceProfileInfoV, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceDiscoverGpus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceRemoveGpu_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceQueryDrainState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceModifyDrainState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeSettings, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetDriverBranch, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemEventSetCreate, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemEventSetFree, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemEventSetWait, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemRegisterEvents, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByUUIDV, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitSetLedState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetLedState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetPsuInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetFanSpeedInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetUnitInfo, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetDevices, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetTemperature, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetVgpuDriverCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceClearAccountingPids, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetGpuPciId, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetMdevUUID, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetUUID, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetVmDriverVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetVmID, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetAccountingMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEccMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFBCStats, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetLicenseInfo_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetMetadata, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetPlacementId, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetRuntimeStateSize, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetType, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFbUsage, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEncoderCapacity, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFrameRateLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetGpuInstanceId, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetLicenseStatus, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEncoderSessions, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetFBCSessions, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetAccountingPids, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetEncoderStats, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceSetEncoderCapacity, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuInstanceGetAccountingStats, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetVgpuCompatibility, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetClass, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetName, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetLicense, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetCapabilities, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetBAR1Info, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetFbReservation, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetFramebufferSize, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetGspHeapSize, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetDeviceID, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetFrameRateLimit, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetGpuInstanceProfileId, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetMaxInstancesPerVm, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetNumDisplayHeads, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetResolution, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlVgpuTypeGetMaxInstancesPerGpuInstance, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSetVgpuVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetVgpuVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetCount_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetExcludedDeviceCount, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetConfComputeGpusReadyState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetNvlinkBwMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetCount, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetHicVersion, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlInitWithFlags, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemSetConfComputeGpusReadyState, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemSetNvlinkBwMode, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetProcessName, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlDeviceGetHandleByIndex_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlGetExcludedDeviceInfoByIndex, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlUnitGetHandleByIndex, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlSystemGetTopologyGpuSet, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlInit_v2, (void (**)(void))&__pyx_f_4cuda_8bindings_9_internal_5_nvml__nvmlShutdown, (void (**)(void)) NULL};
    void (**const *__pyx_import_pointer)(void) = __pyx_import_pointers;
    const char *__pyx_import_current_signature = __pyx_import_signature;
    while (*__pyx_import_pointer) {
      if (__Pyx_ImportFunction_3_2_2(__pyx_t_1, __pyx_import_name, *__pyx_import_pointer, __pyx_import_current_signature) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
      ++__pyx_import_pointer;
      __pyx_import_name = strchr(__pyx_import_name, '\0') + 1;
      __pyx_import_signature = strchr(__pyx_import_signature, '\0') + 1;
      if (*__pyx_import_signature != '\0') __pyx_import_current_signature = __pyx_import_signature;
    }
    __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
  }
  __Pyx_RefNannyFinishContext();
  return 0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_1);
  __Pyx_RefNannyFinishContext();
  return -1;
}

#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_cy_nvml(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
  {Py_mod_create, (void*)__pyx_pymod_create},
  {Py_mod_exec, (void*)__pyx_pymod_exec_cy_nvml},
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
  {Py_mod_gil, Py_MOD_GIL_NOT_USED},
  #endif
  #if PY_VERSION_HEX >= 0x030C0000 && CYTHON_USE_MODULE_STATE
  {Py_mod_multiple_interpreters, Py_MOD_MULTIPLE_INTERPRETERS_NOT_SUPPORTED},
  #endif
  {0, NULL}
};
#endif

#ifdef __cplusplus
namespace {
  struct PyModuleDef __pyx_moduledef =
  #else
  static struct PyModuleDef __pyx_moduledef =
  #endif
  {
      PyModuleDef_HEAD_INIT,
      "cy_nvml",
      0, /* m_doc */
    #if CYTHON_USE_MODULE_STATE
      sizeof(__pyx_mstatetype), /* m_size */
    #else
      (CYTHON_PEP489_MULTI_PHASE_INIT) ? 0 : -1, /* m_size */
    #endif
      __pyx_methods /* m_methods */,
    #if CYTHON_PEP489_MULTI_PHASE_INIT
      __pyx_moduledef_slots, /* m_slots */
    #else
      NULL, /* m_reload */
    #endif
    #if CYTHON_USE_MODULE_STATE
      __pyx_m_traverse, /* m_traverse */
      __pyx_m_clear, /* m_clear */
      NULL /* m_free */
    #else
      NULL, /* m_traverse */
      NULL, /* m_clear */
      NULL /* m_free */
    #endif
  };
  #ifdef __cplusplus
} /* anonymous namespace */
#endif

/* PyModInitFuncType */
#ifndef CYTHON_NO_PYINIT_EXPORT
  #define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#else
  #ifdef __cplusplus
  #define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
  #else
  #define __Pyx_PyMODINIT_FUNC PyObject *
  #endif
#endif

__Pyx_PyMODINIT_FUNC PyInit_cy_nvml(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_cy_nvml(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
  return PyModuleDef_Init(&__pyx_moduledef);
}
/* ModuleCreationPEP489 */
#if CYTHON_COMPILING_IN_LIMITED_API && (__PYX_LIMITED_VERSION_HEX < 0x03090000\
      || ((defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS)) && __PYX_LIMITED_VERSION_HEX < 0x030A0000))
static PY_INT64_T __Pyx_GetCurrentInterpreterId(void) {
    {
        PyObject *module = PyImport_ImportModule("_interpreters"); // 3.13+ I think
        if (!module) {
            PyErr_Clear(); // just try the 3.8-3.12 version
            module = PyImport_ImportModule("_xxsubinterpreters");
            if (!module) goto bad;
        }
        PyObject *current = PyObject_CallMethod(module, "get_current", NULL);
        Py_DECREF(module);
        if (!current) goto bad;
        if (PyTuple_Check(current)) {
            PyObject *new_current = PySequence_GetItem(current, 0);
            Py_DECREF(current);
            current = new_current;
            if (!new_current) goto bad;
        }
        long long as_c_int = PyLong_AsLongLong(current);
        Py_DECREF(current);
        return as_c_int;
    }
  bad:
    PySys_WriteStderr("__Pyx_GetCurrentInterpreterId failed. Try setting the C define CYTHON_PEP489_MULTI_PHASE_INIT=0\n");
    return -1;
}
#endif
#if !CYTHON_USE_MODULE_STATE
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
    static PY_INT64_T main_interpreter_id = -1;
#if CYTHON_COMPILING_IN_GRAAL && defined(GRAALPY_VERSION_NUM) && GRAALPY_VERSION_NUM > 0x19000000
    PY_INT64_T current_id = GraalPyInterpreterState_GetIDFromThreadState(PyThreadState_Get());
#elif CYTHON_COMPILING_IN_GRAAL
    PY_INT64_T current_id = PyInterpreterState_GetIDFromThreadState(PyThreadState_Get());
#elif CYTHON_COMPILING_IN_LIMITED_API && (__PYX_LIMITED_VERSION_HEX < 0x03090000\
      || ((defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS)) && __PYX_LIMITED_VERSION_HEX < 0x030A0000))
    PY_INT64_T current_id = __Pyx_GetCurrentInterpreterId();
#elif CYTHON_COMPILING_IN_LIMITED_API
    PY_INT64_T current_id = PyInterpreterState_GetID(PyInterpreterState_Get());
#else
    PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
#endif
    if (unlikely(current_id == -1)) {
        return -1;
    }
    if (main_interpreter_id == -1) {
        main_interpreter_id = current_id;
        return 0;
    } else if (unlikely(main_interpreter_id != current_id)) {
        PyErr_SetString(
            PyExc_ImportError,
            "Interpreter change detected - this module can only be loaded into one interpreter per process.");
        return -1;
    }
    return 0;
}
#endif
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none)
{
    PyObject *value = PyObject_GetAttrString(spec, from_name);
    int result = 0;
    if (likely(value)) {
        if (allow_none || value != Py_None) {
            result = PyDict_SetItemString(moddict, to_name, value);
        }
        Py_DECREF(value);
    } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
        PyErr_Clear();
    } else {
        result = -1;
    }
    return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def) {
    PyObject *module = NULL, *moddict, *modname;
    CYTHON_UNUSED_VAR(def);
    #if !CYTHON_USE_MODULE_STATE
    if (__Pyx_check_single_interpreter())
        return NULL;
    #endif
    if (__pyx_m)
        return __Pyx_NewRef(__pyx_m);
    modname = PyObject_GetAttrString(spec, "name");
    if (unlikely(!modname)) goto bad;
    module = PyModule_NewObject(modname);
    Py_DECREF(modname);
    if (unlikely(!module)) goto bad;
    moddict = PyModule_GetDict(module);
    if (unlikely(!moddict)) goto bad;
    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
    if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
    return module;
bad:
    Py_XDECREF(module);
    return NULL;
}


static CYTHON_SMALL_CODE int __pyx_pymod_exec_cy_nvml(PyObject *__pyx_pyinit_module)
#endif
{
  int stringtab_initialized = 0;
  #if CYTHON_USE_MODULE_STATE
  int pystate_addmodule_run = 0;
  #endif
  __pyx_mstatetype *__pyx_mstate = NULL;
  PyObject *__pyx_t_1 = NULL;
  PyObject *__pyx_t_2 = NULL;
  int __pyx_lineno = 0;
  const char *__pyx_filename = NULL;
  int __pyx_clineno = 0;
  __Pyx_RefNannyDeclarations
  #if CYTHON_PEP489_MULTI_PHASE_INIT
  if (__pyx_m) {
    if (__pyx_m == __pyx_pyinit_module) return 0;
    PyErr_SetString(PyExc_RuntimeError, "Module 'cy_nvml' has already been imported. Re-initialisation is not supported.");
    return -1;
  }
  #else
  if (__pyx_m) return __Pyx_NewRef(__pyx_m);
  #endif
  /*--- Module creation code ---*/
  #if CYTHON_PEP489_MULTI_PHASE_INIT
  __pyx_t_1 = __pyx_pyinit_module;
  Py_INCREF(__pyx_t_1);
  #else
  __pyx_t_1 = PyModule_Create(&__pyx_moduledef); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
  #endif
  #if CYTHON_USE_MODULE_STATE
  {
    int add_module_result = __Pyx_State_AddModule(__pyx_t_1, &__pyx_moduledef);
    __pyx_t_1 = 0; /* transfer ownership from __pyx_t_1 to "cy_nvml" pseudovariable */
    if (unlikely((add_module_result < 0))) __PYX_ERR(0, 1, __pyx_L1_error)
    pystate_addmodule_run = 1;
  }
  #else
  __pyx_m = __pyx_t_1;
  #endif
  #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
  PyUnstable_Module_SetGIL(__pyx_m, Py_MOD_GIL_NOT_USED);
  #endif
  __pyx_mstate = __pyx_mstate_global;
  CYTHON_UNUSED_VAR(__pyx_t_1);
  __pyx_mstate->__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_mstate->__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
  Py_INCREF(__pyx_mstate->__pyx_d);
  __pyx_mstate->__pyx_b = __Pyx_PyImport_AddModuleRef(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_mstate->__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
  __pyx_mstate->__pyx_cython_runtime = __Pyx_PyImport_AddModuleRef("cython_runtime"); if (unlikely(!__pyx_mstate->__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
  if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_mstate->__pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
  /* ImportRefnannyAPI */
  #if CYTHON_REFNANNY
  __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
  if (!__Pyx_RefNanny) {
    PyErr_Clear();
    __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
    if (!__Pyx_RefNanny)
        Py_FatalError("failed to import 'refnanny' module");
  }
  #endif
  
__Pyx_RefNannySetupContext("PyInit_cy_nvml", 0);
  __Pyx_init_runtime_version();
  if (__Pyx_check_binary_version(__PYX_LIMITED_VERSION_HEX, __Pyx_get_runtime_version(), CYTHON_COMPILING_IN_LIMITED_API) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  __pyx_mstate->__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_mstate->__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
  __pyx_mstate->__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_mstate->__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
  __pyx_mstate->__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_mstate->__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
  /*--- Library function declarations ---*/
  /*--- Initialize various global constants etc. ---*/
  if (__Pyx_InitConstants(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  stringtab_initialized = 1;
  if (__Pyx_InitGlobals() < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  if (__pyx_module_is_main_cuda__bindings__cy_nvml) {
    if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_name, __pyx_mstate_global->__pyx_n_u_main) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  }
  {
    PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
    if (!PyDict_GetItemString(modules, "cuda.bindings.cy_nvml")) {
      if (unlikely((PyDict_SetItemString(modules, "cuda.bindings.cy_nvml", __pyx_m) < 0))) __PYX_ERR(0, 1, __pyx_L1_error)
    }
  }
  /*--- Builtin init code ---*/
  if (__Pyx_InitCachedBuiltins(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  /*--- Constants init code ---*/
  if (__Pyx_InitCachedConstants(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  if (__Pyx_CreateCodeObjects(__pyx_mstate) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  /*--- Global type/function init code ---*/
  (void)__Pyx_modinit_global_init_code(__pyx_mstate);
  (void)__Pyx_modinit_variable_export_code(__pyx_mstate);
  if (unlikely((__Pyx_modinit_function_export_code(__pyx_mstate) < 0))) __PYX_ERR(0, 1, __pyx_L1_error)
  (void)__Pyx_modinit_type_init_code(__pyx_mstate);
  (void)__Pyx_modinit_type_import_code(__pyx_mstate);
  (void)__Pyx_modinit_variable_import_code(__pyx_mstate);
  if (unlikely((__Pyx_modinit_function_import_code(__pyx_mstate) < 0))) __PYX_ERR(0, 1, __pyx_L1_error)
  /*--- Execution code ---*/

  /* "cuda/bindings/cy_nvml.pyx":1
 * # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.             # <<<<<<<<<<<<<<
 * #
 * # SPDX-License-Identifier: LicenseRef-NVIDIA-SOFTWARE-LICENSE
*/
  __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
  __Pyx_GOTREF(__pyx_t_2);
  if (PyDict_SetItem(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_test, __pyx_t_2) < (0)) __PYX_ERR(0, 1, __pyx_L1_error)
  __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;

  /*--- Wrapped vars code ---*/

  goto __pyx_L0;
  __pyx_L1_error:;
  __Pyx_XDECREF(__pyx_t_2);
  if (__pyx_m) {
    if (__pyx_mstate->__pyx_d && stringtab_initialized) {
      __Pyx_AddTraceback("init cuda.bindings.cy_nvml", __pyx_clineno, __pyx_lineno, __pyx_filename);
    }
    #if !CYTHON_USE_MODULE_STATE
    Py_CLEAR(__pyx_m);
    #else
    Py_DECREF(__pyx_m);
    if (pystate_addmodule_run) {
      PyObject *tp, *value, *tb;
      PyErr_Fetch(&tp, &value, &tb);
      PyState_RemoveModule(&__pyx_moduledef);
      PyErr_Restore(tp, value, tb);
    }
    #endif
  } else if (!PyErr_Occurred()) {
    PyErr_SetString(PyExc_ImportError, "init cuda.bindings.cy_nvml");
  }
  __pyx_L0:;
  __Pyx_RefNannyFinishContext();
  #if CYTHON_PEP489_MULTI_PHASE_INIT
  return (__pyx_m != NULL) ? 0 : -1;
  #else
  return __pyx_m;
  #endif
}
/* #### Code section: pystring_table ### */
/* #### Code section: cached_builtins ### */

static int __Pyx_InitCachedBuiltins(__pyx_mstatetype *__pyx_mstate) {
  CYTHON_UNUSED_VAR(__pyx_mstate);
  return 0;
}
/* #### Code section: cached_constants ### */

static int __Pyx_InitCachedConstants(__pyx_mstatetype *__pyx_mstate) {
  __Pyx_RefNannyDeclarations
  CYTHON_UNUSED_VAR(__pyx_mstate);
  __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
  __Pyx_RefNannyFinishContext();
  return 0;
}
/* #### Code section: init_constants ### */

static int __Pyx_InitConstants(__pyx_mstatetype *__pyx_mstate) {
  CYTHON_UNUSED_VAR(__pyx_mstate);
  {
    const struct { const unsigned int length: 15; } index[] = {{1},{18},{8},{10},{8},{12},{12},{10},{8},{24819},{25166}};
    #if (CYTHON_COMPRESS_STRINGS) == 2 /* compression: bz2 (4880 bytes) */
const char* const cstring = "BZh91AY&SY\302\2152\352\000\n\360\177\200q\000\000\010@t<\000\277\377\377\340\277\377\377\360`\"<\332\025F'\301\244\2424\005\000]9\303 \001\"6\032\002\200\014\001\001\213Z(\000)\201\000\024\000\000 \005( \t\000b\n\305J\000\000\0005O\004\232yT\336\247\224\203@\320\000d\000\000\000%O \204D\311\251<\232(\007\250a\032\00014\320\0070\t\246\0012\030\000\t\202`\000\000$\365JU'\224b\006L\004\320db4i\246\206@\032\0070\t\246\0012\030\000\t\202`\000\000\025\022B\t\251\3514&C!\210M3I\223%\017P\360\247\350\247\246\254\253,\251I$\254\244\244\244\244\251))JI%\253\326\377\277G\371\372\377\327\354\364=O\354\276\257\311\257\360\211\307\366\212S\255\\*\373\234p\236x}\025fc\026\221\370*\254M}\2577pd\2155\276A\341\001\254\275\314\333\033\3553~\323o\006l\205@\264B\rR\020\266E~\364\254\257>\361~Sv\213\341\227\3265\327<}\372\270\272 \210;K\216;L\206SU\003#\227\347\343\345\005u\327\332\342R\302\303\341\363-wF`hI)\350\241GX\215H\324\312~\213S\352\340\262^\347\274}l\253\252\206\271\206-\362\315\351\353\321\2253\031\355\227\253\212=\"\265\"%\221\036oK\245\017\010y\203\240\213\034\257\021 \223'f\301D\222\363\261z\010{\327O\014R%r\017\255<D\026\352\245)\326\256\025{\334p\236x}\025fc\026\221\352\252\2615\356y\273\206\242\2659\260\360\200\326U3lo\256\315\251L\2703d*\005\242\020j\220\205\262+\325\212A\255X\277)\273E\360\313\353\032\353\236=\265qtA\020v\227\034v\231\014\246\252\006G/\317\307\244\025\327^\256%,,>\0372\327tf\006\204\222\236\212\024u\210\324\215L\247\350\265>\316\013%\356{\307\263*\352\241\256a\213|\263z\370\034\230U,\254\224\225d\265\224\266\311mIY-\251)-\222JJ\245-\222I-\262Ud\222\331-\222\311%\240\214\023\307\2533s\033\357\267\305\305\036\261Z\221\022\310\217\017K\312\207\204<\301\344s\341\336Nl\215.*\267%\306_\205\360<\303\0310c\0061\364\2100%$z\324\210\302\205*$\362T\030\251\212Hb\205R1Q&\0254T*\244QR\2469\220\370\221\246\302\251\242iUQT\252\"\251\022UH\225\"\021\357DH\370\252H\2331TR\222\225[+\t*\245(IZJ\245),\225%%%mp\254R\251\014c\022)D\251RJ\252\252\250\252\222\252%R\250U\033""\"@\303]*\222\252Kd\265$\222\222Y)*\222\311RJW\222\352UI*%T\n\244U\014)0\251J\225m\244\262VR\224\244\252JRJ\222T\226YRJ\245.\226\351JRRY%)RR\333IjJ\373l\376\316j`O\335(\374J<\374\035O\3050s\252\253O\254\356\365\261\241\352\177\023k.\314tn\335\242\260\233+\363\016\215\233\333v:>t\2469\025\325\360cO\007\3324y\216\203b\022\0220Ag \017\223\232P@\341C\030\264\224\202!\213\340\262\253\316\372\034\215\317\0273JM\277\315\272x5\253t4\277e\273\253M\025;\370\327\266\262\262\311\265\266i\324\377O\255Rtp\346UJUc\021\215\na\217\357\253q%iUUQUU\331XV+\233\3141\263\243\233Ug\217\025ot\256\024mj\331\303\203\005\025%Ww\354\030M\034<J\036^{q$\222174i\350Q#\263\223M\010F\355+\272\2676LM<\014i+\315Km\214\222Lk\252VA@ \3068\344I!(1\000\202\253dc\035\335\274-\246\035\025\303\206\346\230\344tf\366\313m\222\243\206\216f\236\242\230)\340c\013<j\331\212\216Jw4\316V\377\370\373\237\345\261<NO\356!\211\201*\203\242\224\245+e&\0241S\025U#\241I\245H\225Q\245\023\024I\213\"[jH\252b\242tT\230\243JLT\330\251%T\303\003\017\241M\232l\255on\232I)R\212\252iR\030J\212\362ui\0336\336\334<[6\032\213l\212\341\331\3346c\221\216\024\247\370u\256u\225\210>\000q\034,%\323NT\254%2\361\222F@\214\007Q\007\202F\014w\021\\\200\204\026\224\226\361\226\351\234\334\025\017<DD`\"\3508pPq0t\013\036=\026\0268\031`\216\247$\022Y\223\222\214\014\310rz\214\321'r\211,:\014\242\004\036\006A\336n\"\"01\014<\t_\213\304A`\332\322V\304Ie$\330$\rA%/j\353\251Y6\264\314\314\312a\246\315\225J}\342\037\341\362i\272\225\320\356i\315OM\235\031\223\024\367\244\304\245QUNM+\224\342\333m\340n\366?''\332n\214sW\235U\311\203\235\266z\333\030\235\233\230b\270=,b\232i\243\226\326\324y\036\276\364\244\303\020p\010\344\307\277\225\355\325\256&?\357\210\256srIsP\210*U'Mfb\263\025\234\334\222\\\324\"\n\234\0242\203%\005\206\224\335\2074\231\265\276\365i\247W\201_\340B\271\325\256\317{\3765\016v\327\222\245R\224)U\376D<\235[<\303\023\263\304\233+\364\177\354{\035\\=\254tp~d4}\t\302\274\235^\r\322n\307\217\316\265\215\232\325\272M*\275)\273G59*\236""\301\3574\323uY\033\331kNH\303M$\306\025\245Ui\212\362{\030\323c\233\r9[k\031\226\342\264i\340\334\230\306\234\315\237A\3118z\233+\324\236\343\223c\351W\211\310\3455lc\314w4\323\253\034\212h\3631\374*i\271\370\030\2324I\022zF\230\354\306\305y;\235^]\355\346\322a\303N\t6t\233[\331\362U\226\333m\335\325;\251\320\247\tB\245\025d\313h\352\341\246\211\325[\244\314\266\252\266\333_~\232\326\265\242\244\360|\333\344\314\311''\251DG\2273\031\305\241y\035\211\316s\031\306P\262d\202@\340\3523 \211\021B0hG\220\203\266m6\233M\241\210C_O1\322\335Uk}\364\306\216\277E\277^\255\341\346v8F\225^\204\211^\326\235m\261\261X\306;\271\316\026\267\025>\206\346\345O\237l\314\315\020y6l\3059:\252Uy\330\307'\362'\342\317W\335\214\314\307\221\260\356\014\300H\203\004\034\025\277\213nN\260W\210\354\244\231\211A\215\2260\331\351\216\352\177\213\304(\217\250\203\344IagA\007c\203\236u:=\325\361\336\036s\234\231\254\347\014\302?\024\244\222[\013\016F!\010@\"\266\351[]))KI%%)`\251JI(\243\303E\246\216G&\030\251\303\302\2567=\n\346\346\371=\244?\023\364!\374\020\301\356\346\374\374<\332\317?\032\277\264\211\021y\t\022i7|\230\252\363\264\365\274_\315\272i7c\320\252\256M1\311[\224\320\252\3644\254LH\217\254\243\022 \311\242\211$\021\243$\235\204PY$\026IT\360V\2151\356V)\242\261N\212\305i\273\206&\224\252\256Lcv\303\r4\2541\216\315+\r91\263\221\2464\341\214;1\205{\216\025\273J\254T\344\306\346\215\236\345bp\241\216lb\247\316\247U\032c\023\242\242G\362S\2060\335R=*\030Q*\221)\\)\025P\252J\346\3075iRU+\223\032sl\331;98#uF\224\211\245n\240x\323\375(\022\306Du=\305I\222^\276<sv\253\252 \326\212!\316\271\363\343\213\277:\321\263\253\034\326\255lp\236\346>\345G\362\006\346'a)Q\372\236^d\223g\010\370\370\035\233$\251\321>\263\263\301\030\235\034\035M\342\272\237\302}&\346\347\245\351\205I\310O\265>\366\035\205S\271\242<\211=h\352\"uT\024\251\"r\037\350\215\334\237\212#g\337\3114{\037\212'\362r'\314\346\366\017\310\331\375^'\r\037;\361x$\3706G\241\347\262e[&Y2\255\212+g\350|\230\331\2627Y\006\366\324s=\307\375""\363\325S\245\312\252f2%UZ\252Sr\245\025\311\204i\016f${\341\352<RN\243\326q\350z\317\274\217\256\254\250\372\237\263$\325[U\021\303\017#\305>*4Q\370\030a_\312\236\327\373D\376\242|}vZ\335\024\301iK)\260\000\005\252ISZ\r\254\000\001\240\n\222MAl\226\322\225\224\266J\311j\244\265IZ\2206\r\240\000\262\226\226K,\262\312\312\225\226R\252M\266\000\224\262YJR\272\353\245))Ie$\245IZKk%\244\264\226\333I$\222\222IIJ\225%\244\244\222\244\222\222ZZ\244\244\257_J\222a5-\226\216\t)\245~\304\377J3\246\237q'4\367\244\216H\346\222Z\222y\337\357\344\235H\325\267\300:\030x\211\3711\035\334)E<Q\206\013\257\315\205\235\376\235f\262ir.\330\302\315\266\326k&\227\"\241\006\352#f\314\253J\331\261\204\331S\271Rp\334\3414\331\242L\266Z\374\223\233d\234\336\266\217:Bn\237f\022>\247\2019\036\223f\2227}\246\347Z_\007\230\322r\232$\330\343\341\313\307\276\332\265\362Mx_\225\025\177\272!\026*lD\256\246\347\024\260i\025\316\363SBj\221\t!io\17728\267\\\\\302G)\256\026QW\270\204X\251\261\022\27178\245\203H\256w\232\232\023T\210I\013K{\345\034h\304\215\207D\234\330\256f#O\006\306\216\n\223\025\262*M\344\333\014\255\262[i42\331+Ii4\254RT\261\013l\244\205PL&\3120\304\355\264&\335\0301\241\220\"P\231\002\n62FP\330F\312;\244\307&\243\266\262\334\314\264\251[\253\030\242\212L)*\274\n\216\032vn\331;\033\030B\252ISu\313xnJ\244\307Etnt\034\233\036\000\222\010$F\332L:\343b\020\231\207\301\337D\233\031D\006\035O3\025\321\263\025LSM+v\315\2324lb\267l\323J\252\2551\211Za\243f\215\2336V\232i\263F\230\331\205!\262\242JRITLQ1H*\212\242\225'b\223JI:8UW7\334\374\224\352c\251>M\215\204\373\216\234\337'\363\177\327\007\230u=Qb\305\362B\r\017\330Q\362y&\016\n\234\222\235\224\323\322\3414\320\354I\302n'a\317\305+\324\245S\326\235\036\322\244J\225\272;\035\017c\022\212F\351&\311\371\241\301\356p\037\3319\023\346\035\023\263\020\250(\266\331m\261\351\016\250\330\244\376n\262\254\335\301\363[j\274Sd\323\263g\221\243\344\242\207\324l\212\303\220\331\375\rI0\264h\303\232c\024\256\0076\307\245\335:\222l1\366\236\263\204?0n""\177C\340\370I\327\275\317W\217Og\257\3072\253TBF\224'\252\275gY\225Z\242\022$,\200\332BXEh\331\303\231]\032snl\274\355\344rz\305H\256\204\230x\205q\324R\2337\037I\311\363\325\347\214\253\213hE\264\025Il\032\301hCRl\026YT\222Z\014\233\021\021\253&\373\303\\+,\253y\307A\265\203jH5i\307W\r\254\227K\\\002\326\rA V\352V\335%*\202\266\n\000\252I-\010in\225z\227\277mM|\003\2220\334a$I\331\365\034\022\0210\237\360z\304x\276a\372?\005{\341\356='\340M\222{\016\217\301\023\202`\220\366\036\t\210\322M\331\272\252\3110S\025\212U\"\252\037CO\301<J\030\0259<\024\354\234\320\203\271=\312I%*\252\020\344us\253M\332rQ\332}6Z\245=lD\367)'\332x\267&\306\306\350{\234$}\r\236J\344\251\034\364\352ls0|Ok\324{\321\010\236\322W\275\300\367\217&\030\247\254\177T\361H\374\023\301\335#\305\360I\347B\017\026\317\234\304O\274\206\210|\312z\003\220\362r=e\376\236\217A\273\233\351=g}KeQ\244\3310x\2714\320\350x\223a\346\"\251*\224[\357\337\266\253\367\312\325\267\242\364\372\310\"LM1-\253Ue\253\025\n(J\225Q\344\352v77G\244\372\207\334;'C\322\2371\350:T\264\352\221\212Fffg\245\330\366\017B\033{1rY\231\230\271,\314)<I\302&\022=\311=I\350r9$R\212Z\265M\306\014\n\225\2052\034\024O\263\301k\260\207\330\244\0211PUD\221\205\022{\205\000h\244\021\261=\311'\255\3660\373\007$\233\245=\335*\331%w1\271\357\332\330\233\"\251&cD\346M\221\262pT\215\367\266\264<\035\266{\303t\222l*9\223?\023\010\234\221\365%t\221\3218I\335\264\331\247\325\2554\327q\263f4\331&\023JIZ6y\025\346U7*T\252+\204\323\207\334\370prEM\216\035\354\266JTh\221\311\203A\342BI&\237\333\324\371\321\310\206\305C\221\034\332$\216\205H\237\2156\025\023\250\244\237z\2464E0\245\027q\"LUJ\254\257\325\006\310\360\372}\311\373\233\010\235\212\221\017\346\247ni\037\371*4zI\303\010\355\366\241\006\2227y\221\034$C\025\033\232JA\366'2M\224+I\007\222V\233\272T\264\346M\320\203\003\201$\224\326\255[k\204\211&$\233*#A6\034\331\253\276F[\231\030\257\245$\365\371[\211?\nRYeo<\000:\356\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000""\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\016\256\340\000\000\000\000\000\000\000\000\000\016\231\230\032ET\204\245D\221H\232\3263-\267O%-^x\000\001\325\335\326\356\000\000\000\016\253\270\353yU-\267\236\035m\257\"\220\302?\332\225RB\252\020\376\025\037\302\222m}\262\333\356^\211oO\207\225\335\327w]\335w\000\000\000\035w\001\345\274\227\236\000\001\327w^[-%^x\000\000\000\000\000\001\325\345\265)-W\236\000\000\000\001\326\362\311RY%~\353j\366\351BOa\375\307\346\372I4\222`\251\037\315$\237\370P\237p\225'\277\204N\005\016\207$|\032\037$>s\362\037r?\007\364pN\215 \236\324\211\331RI\346)\032DvLrR\253\270l\036q\311\032?TtuI\360Tuz]\037\320\3029\233\273\242uI;<O;\364*T\356~\303\203\231\272+\262'Q*~G\2748l\3669\236\t#\365c\241\363's\357$u:\237\271\370>^{-B*\221\351y\304\247\346\216Hl\377\216\263\263\301M\310\360\016\250\333\245\265H\341?\232G\322\303\223\2521\363\016\211\362E)U+*\226K,\266V\222\251R\224\226\244\244\244\262\244\245J\225-Ke\224\241B\204\250\241\024\252\262\254US\354pI\037w\245\330\317I\204\357b\330\352\nF$\303\022\250\223\033\230iRF\333[*\261)D\252}\177\271^\034[\352l\212\232\177T\370\016\356c\320\244\224\352\375N\250\322UET\360<\204\356wO\004\246&\034\334<\225\3551\017i\325\330T\251\340~\345\035\317\023\017\213\322\211\330r*x\236bH\223\314\2216\016Ct\356\247\212'\316\341\347p~\256\207\"\247\352\211\235m\210S\311R0\354+\022\225*\207%|\021\351\347\265or\373{\376\372\335\305\326\374j\233\366\373\334\241f\206\341 cj\377\234\275\242\210)\246\364#3\266\250\225\371\240\250D\246\253Z\212-E\\!\275p\345\01347\t\003\033W\304\275\242\210)\246\364#3\266\250\225\304r0\321\203'\270\311%y6\r4\2300\302S\273M7c\016\352\303\251\211\212\230\354x0\323\neZ;\037\024\363\260r6x\023\360\030\212\375)\263Hln\204\024\362\022\212(\246\236\223\332\235\023\032$\363\224\2159\006\315\233)v(\330I'\261\030l\224\370\242M\321\244\247RK\020b\317\230\2028J\334\010\3733'It\320\360\355&\340E\336\3166\315\362\262w\022$\331\241\212U""\024\222\252)YKi$\267Kt\265\244\266\224\267K\245\244\254\245I%%%\256\225t\224\255\226Y-)I%R\353\245\322\030\2121\214E)U\025JUJ\225-J\322KKI%\224\264\226JX\025IJ\211QAJ\222UIT\245UU\025\024U\n\241J\221U\002[%\265\222\266KiJ\222\332I\"\252J\251\022\233@\374\252\330\331H\321L\266\341R\252#u!\212*\211\265\226\311\"\325\323\020\241Hl\244\233\026j\334V\022m\245\243\022\016jb\211\024\246\314b\024\230\2510%T\223r\2224\315\364\306\34764\243w\003G\261Z\222j\313]\3214G\230\340I'\304\"\244\301\210\215vn\220\2456b0\245*\"\224\2344\321\245TU\024\221\206\216gS\232V\024\252\252aU$\235\022\245G0\256H\346\245v\364\232=I\324x#\273cHx\036\244\207RNcx\366S\275\267\265\215SV\335Q\245K\025j\325\244<\310\330\322\247t\335(\357^'3\006\035\006\026s\351\225L\314UfeS3\025e\025\3614)\330u\335i*\215\211\"J\221\346\021\331$\234\324\201:\217\237\342\236taO\372\2250\237\037\266\337S\346\222N\320\374\335\277KbW\344\237\252#\372\252!U\tT\212\241UT\222\250\252\2074M\025$\366$|\030y$I'\222/\242\313m\245[-\266\227\305\260h\304\320\363\036N\033\276\365$\217#g\316\237J!\344I\354{\037B\235\3254}\3420iP\321!\023p\366\220\222I\363<\203\204\374\217\325\210NC\320\222}\270x#oP\372\363\013Ff\034;\247\254\306O\276\332\232i\3314\242\232*i\215:\016\030\372\212p\322\275\212\352\323\030\304V?\023\365$LG\265\022}>\260?\376.\344\212p\241!\205\032e\324";
    PyObject *data = __Pyx_DecompressString(cstring, 4880, 2);
    if (unlikely(!data)) __PYX_ERR(0, 1, __pyx_L1_error)
    const char* const bytes = __Pyx_PyBytes_AsString(data);
    #if !CYTHON_ASSUME_SAFE_MACROS
    if (likely(bytes)); else { Py_DECREF(data); __PYX_ERR(0, 1, __pyx_L1_error) }
    #endif
    #elif (CYTHON_COMPRESS_STRINGS) != 0 /* compression: zlib (5379 bytes) */
const char* const cstring = "x\332\355\235\311v\334:\222\206\365(^\226\357\271\233\353\356}\035K\266duIv\226\322\222\273W\377\241HHb9\223Ls\220\255z\372\0028\202@\004\206\224W}\350\205m\021\021\001\022C 0\351\373{\272\313\013\201\274@S%\251\270O\322\357\300>\221?\313?\3732kwB\375\257H\366\335\277\207\227_H\223C\256\376\377\243Mv\375\363Z4\231xH\332]\0034\242\226\177\247OI\365&-\213\272y\363\307\337\212\347\375\356F4m%3y{\242\377\364\346o\235\340\037\177\276i\213:\177,D\366&/\244\014%4X\373\363\215J\373 \236\363T\310\264?\244\260!--t\217\227O\325Og\345\376\3206\342R\032J\n\245\3766D\250\317\321x|Y<\224}\366\224\205\342a\020\277\020\315?\304\313M\331$M^\026_\237*Q?\225\273,P{\373:\355\227\272\021\373\263\344P\007\013oeN\202\223\036\313\\\225\370\311\211C\340\3177}\255\276\r\220Y\326\3741:\001\257\303\267\010Cf\026\365\311\366\215\342<\027\273\354.\331\265cSti\251\237N\337\337\374u-\366e\365\002\357\307v\362URd__\016\"T<\317\036\305\331S~\370\224\213*\251\322\247\320|\332:<\227\263w\351u\231\365]\340\371\257@\235\335\367\353\262P\355\253\255C5\312\364\373\227\207\007\351\\\002\013\267\323\350?C{p\231\251\037cZ\031ikS\253\276Q\333\266^i\333T\016\370\314\276\307\252*\010,I]!D|v_\207\366}\243<z\347\200n\304\241\254\232\320<u#g\242j\362\207<\r\360.\226\272\354.\333\374\337N\207k\352\226;Q\205\313\367?\275\3172\351^\353\274x\014/\330ASv\263\274\021\251\224\213\323k\232*\277o\273F\025\256%\375yr\237\357\362&\217\324k\253J\024M\327\366\316+\361#Jy#\252\007U,qJ\345OQ]\313\252\253z\325\321U\004\252\337=\312v3~\354\313\230\366\261H\356wb\030\247\336\036m*\266\313\352\361F\250\354\237\261\306\373\237dw\331\274\333\250a\373\262\310\304\257\345\343\030\007j\231\375Z\036\312]\371\370r%\236\305.\320F\225\354?\026i\365rP\016`\354S\256\264#m\016\321]\210v\376\334\267\250\235\376*\213\207\221V\314x\302\247\3741M\317\312\266hD\325;\361 \215\255\372\352\252*\253\230*\034\364n\213\374G+n\213\264\224}8mD\326\031\352\336!\324\220\336m\372a&`\2441\2648SQf\336Fg\034]=\262\234\262\261n\342\273\372\371\351\231\3128\260`""\317\223b{\020\"\013o\376\027\207\3756\331\037vA\rG\t\267\207\360aW\366\362\017/rR\226\247C\274\022\363^\355yr_\345\251R\271\013\326\371r\220\201\246\352\310\343\260\311\247\034c/H\307plv\204\026\353\302\245\315\273\274j\344\0047\377w\374'P\252\001\2122v\277\222?\027\265j\373\347\"Q\361D`+\374T\326\215\032\344\302_Sit\223\367\340QY5\213\252\334\177\271\377\227tA\374d\314g\346:\251\036\363\342\253\330w\365\034\0342\365\223\246\316\357\351Q\271\351\212u\341\2532\355*`\321 6\377\207\253/\237/\272\277~k\276\257\314!\246\024\360\374.L\374\363\363U^|\017w\001\275|\027\267u\213\r\241Z;\251u!\232\323\237\341\355\257W\332\036\245\324;D\221\365\252\201o\271\271\271\376zu\027<_\335$\217B\246\347\225\330\253\2509ik\341\234\372\305\326\372\353\354\277&\3434W\r\342\343\257\300\021e\220\017\026\026\267M\276\033\372\3061\303\357&\313\003\363\332%\215\364H\373\210\267S\r{\273/\313\346IN\3636U\371\220\357Bc\230\245n\320:\231\256X\266U\032\243\320\257)\205\366s\371)\251\234\273~\020M\222\357\256\362\272\211RS\0256\214Uc`b\257j0-.8\023\241g\023Qe\343\252KX%\315\322\261\303\322\2158$yTl~#j9\177W\321\370\373\315%\033g\277\336J\200\215\362\347\215\330'\0079\242~\222\225_>\312)C\327\200\002\277\244\253u\331\252\235CY/\332\231\035W\n\3518knC\376\234\2650`Z\335\256#f\316\254~\254\327\211\216G\264\326\034\246\240\302\263OB:\305\362Q\024\242l\353\360a\217S\r\2363+\003\343\306\305q\375PY\330\250\000S\272\230,i\022\034\265\360\331Yy\225?P\026\266\351\223P\373RU\374R\330B]F,\021\035u\241zU>\036\241\265\215\316P\365\264~\375z*\276]\222v!\303\354\346#-\305V\332\267\262\372\276+\223\254\033\227\206A\363F\374\220\336E:\255\341\347\200\342g\003\026\317r\200?\002\332\225\305\343Q\271\037\2412\370\347gY\001[\265/q\264\211\000G\252\367Zr\214>*s.'W0\020\230\321\264\203\341\372\023ni\261\232\263Uk\363\241\236\202\262\243\226u^kc(\243\331@\264\005\275N\217{\007\2753G\277\002[U\307\033\211s(q\266\216\267\3756\346\243\372\202}\237\246j\3262D\367u\\\355\304-1\331\232Qq\243\255~\236\024ge\321T\245\234=\354\362""\364\345\367\330\210\265\020\274\262I\251N\335b\032\341\360\252\026\340\264\2738Krql\227\264\263\350G\303~c\374]d\377d^\346u\372\277\241\374\276>\211j\237\354\344p\247\372\306Q\375\342\262h\372\245\245^(\354\244\201m\246\267q\374F\036gq\336M1\226,b\326Vl\343\301+'\266\352$\031\245Ix\367c\225_S\260\246\037>B3*\244\243T\007\227\376\360\220\027\262\241l\323\262\337\247#\014\316\261\034\235\241;\326\233S\265\310\360\0039A\242\324\027\275\325+0\216SM\376,\364@\302\325\312\010\003\027Gkv\373\350\225\220\237'\007\253\311\237\036a$x\"\034\241\357\230\r3V\364\351\334\021\0371\317\353\324\260\375j\003\221\312}\014\030\221\253\355e\214#\215\321\003\357k2X\214\300\326\221\313\337\223\037;\200\036c\350\325\005\362\033\325\351\322%\202\017;\017\317\240\244%\217!m\223\244O\362\315\246\315\370q\235_\215\234\216%J\313R@\210\353\326\2414\2063\256\213#\263\3160\245W\350\217\177\250\303\225\351\323\234\243\317[\367\272\243\313\357\034\341\270\022\342\316nT9\257D\234\302\267$o\202\024n\304c.\377\255:E\217\306\355\355\345\007\263\007\272g.\267r\020\035\253\376Jdg\345\256\254\350\3720$\235\265\255\313n\266\267\256f\251\213\252\377\217\247.\352P\205\020\343\361\207\006(\315\220\216\256\374w\337\004c\203\331\345\372\201_\"\346\230u\210&\035\317\205h\316#\327\335\376\322y\244\331\264\026z\350\210\322\363\035\346\241t\324\263\376 \206sP\347Tc\227\3109;s|\225\305\276\302\215Z\307\330\273}-\247\253\257-\371\265\"\246J\254\252k\266\342V\212]!\014\261\026\266N\350\263\024_\355\257_\tsY\214\225\217Z\022\263[=\277e\244\247\250!;i\362\321\377\361\306\347\315\013\316\233\035\245\350\317n\232\364\034\341\246m+\352\311iR\375\345kX\304^\215\275ir\244jt\227\3457\216b^\342\310%\206\321\332u\362\31379\350F\026Q\325\216\rYCD\033\221x=j\305\334%1\234\256\373y\237~,\032\372X\227{<]\026YH\263\215Y\207&V\236\177\245\2736\023Y\257\3035M[\257\217}|\222q1\325s\231gC_\036\216JW\322\367\234\314a\356\205h\372\270i\250\261e\322\347\273\353+=\241\317H&|J\212l'N_\344\374\342\264\255e\323|~G\013lE\225';:M\205\315\313""\374\316\332,q\274\216\225<fkL\334>\250# \345\013\225$\255\014\3639\303\364<\341\341.\010j\n\333X\205e\016\3726;+\324\305\032Z\301\235\355DR\315\343\307&\317j3\365\354\320\216\013tZ\322\215\250E#\347\233We\372]N5\324u\225\332L\236\316wR\022\352kI\313w\311.\317\206\225\232\252\334/k\371\242>\234\347\325\376gR\t\262\t\351#\231\326,\247\364\3232\251\262MR5\237\333\375\275\250\226\211C\216\227\373\344\221\266\3769\331\213\345\023\252%N-pzrw\237\2275i\361\342\220\236\355\276\337=\364\027\350\226i\262\370\3304\325h\315z\177\241L_\347\205\364\312\256\014X\211\256\366\347\253\223\365R\231M\230\357NN\315uNS\367$\315G\343]HB\274\277\356h|\372|\255\361\316H\321./\232)\323%E\263\r\322)\243\216Q`]Q\r*\033\263/\215:\366w\214JtJ[7\345^T\247eYS\231j\327\002\215W'\023\226\375\235\272\031\350\022\326n\000\262b\332M?Sf\274\321\267|\276\274\263g\244i\267\362\214\224\371\336\2359\016X\256N\353\027\306\3659\303=\210J\235EU>\273;\223l\244.n\302\341\371\257ey{\222\027\001\250\365b\356\324\376?\375\020v.]\320u\376\250?\322\204\277\250\363\036\242sc\306\313\217\267\316\226\217\307;\027\262\356\366e\361^\235,kJ\303\357-oz\331\r\315\231>\215\365\375-\255EUm\027i\246k\371\230\246\332\005)\303\257\252,\207t\352\263\350\353V\016\203\363\030G\274\177\377K\021\244:\221\226\327\207]\362\322\357\346\220I\266\316Fyz9\366\016\255l\231\330\237&\317\324\301\362z#\212l\230\265\231\037\370\276m\312\316#\214Cg\277\264bT\272\375\312[\307\247n\203\314n\031\263\374Wm\247\"\014|\353~%@u\207\324\032\266\306\245 \343\351\260\222x\263\271>Y\234a\270\030\206,\371\340\237\255\250^\2067\352\357\036\030\365e\335\2702\007L\375f\225\225\266\270\361\264\374zG\252\326\005?\3136/{\237\0246}\203u\021\311\360,\256t\365r\364\225\244\347\3776\"d\355\366\221\2352\3342Z\346L'\314\241\022\031\323XW\207\254\250c\274\2523l\346\233\305\325$\273\261+\327\354(\264\251\312\246\353\365\275\275\333Zv(*#=0\237\003\271\351\306\315\362{\365\343\017W\345\317\371\212\215\212\276M\023\352\222K\177\267\3054B\246L:\346\305\230E\330\234d\337*9""\036nn\256\2552\327=\007\237b}\353|\223\204|\216\347\377\262\236\253S\332e\373\370$\313\331H\313r\343\201v\273\343\304\270S>]\303\350\234g\242\252L\315\n\206\r0V\372\366\220\231\2622`O\366\254\302x^\227\030\314\373\013\035\3240\236\024\262\260\372\003\272{9C6\013\355\246-\212\376\362\311\362\306\206\221\207u\030\223L\267\016Q\263A\t\373\035\366\363\251\025\231\352V\333\230oL\030C\314\346r\274\326\260|\363\255#MY$\3561\030\357\326\271f#\322\246.\001\030=?F\302p\320Z\t\337\330\205@\036F\260\3037\227\310`\205;\237oK\216\323P;%\254Y\260g\350\035bD;1OP\330_\355\323VS0\3738IM\013\316\315r)8\246\367s\236\251\0245#\324\001\366.N\264N\261\363!\177=l\254&uY\324L\177\t\026\354D\324;\327\324\000U\210JE\325E\335\356\017v\037\271J\352\346\364\364\177\317wm\375\3645\337[3\267i\030\273-\016\332@\246\346U\213\341@\333.&\n\234u<\364Iq.\">m\037\036d\355/\363\356\346m\311A\205\275\323\334\225\362\"\335T\3442#\347\254\323\371\245\314\256\262\361l\302\205\354m\025\361z\272\314\267<k\236\314\030R:\275T\364\255\245\363\341t\300h\205q\334;\311$\3310}o\325\375\256\0172\024Ru\232?\266\275\322\331\223\220m\2545\334\342e\365\343\263\371L\3469M\366\272p\310J\366\275\222&B\224S\037\004\235\2665\225\226\027eE-\204]w\277P\316,\241k\031\340\347\262\230\210\311\2474\"\013\274\266\036\252\025\205\262\022vB\362Y-\341dv\344\241>C~\021Qw*U\016fr\306E\206\215*\231\322Z\216\366\303<\205h0TX@H\230q\346eM\315\325\307\331\310\345\303\266\221\236s/;\231k&4\354d\326\366L\210L\030z\3272FQ\221\237\021\311]o\266\241\242\363iE3\257\321\361\023i\223\247$\322\3305\345n\276\335}5\353\231\206Ra\323\265E`{\220\376\237\303#\253(\253\312V\350\027\365h\177\361\345!\341\203;\242\301\250\361\240\251\222\334^\317\030\213\252\357\217\326j\270\326\026\354\311\357\020me2\360\262\326\330\355\243\321593\037\277\317Z\240a\333\275\232b\253\310\332\325~\215\235g\253\264\365\216q\372B8\317\376\365\211\300\303\272\346`\276\267)0\276\263\314u\2712@9\377M)\373\325\"\232Y\332\357\017\215i\n\337\362\346i\022\346\315\316g\373\344\327fw!""\202w\256lY\003\312<\233h\306+\313\233\001T\005\310\026&\203\220\371\330?%\303\3553\330-\220\317a\232s\032\225IvL\272\311.?Uu\215\\\2716z1\251\317\233\\\245\370\232T\217\\\2300\365\326\213*9<\345iM\355^\261=\212\337\025\333z\366\304.\270=\261e\202j\214y\321\235\226\247\006\373\205\376\362\030\344\342\221:\346\270x\240\2161\216_\243\225\261\276\361\251=&F\215e\3524\245Y>\366M)lizZ\246\311m\003\345\270\351\220CdvMv~n!z\3604\004\214\263\301\226[2u\372j4\324l\347\344\323\360\277\312\344`\234R\001\337D\367Pw1\320\336\361C^\247\345\263\250\214\005T\345Y\236\225\323\\v\304~5X\215\305\346\350\"[F\376`&\221\233\345\272\3074N6\364\207\222O\270\363\306'\364\251\342\023\372\354\360\tsB\270\346\2178\364\005\243NwH\033\343\021\336\351\331\005\375lS\267S\247\034\036\351\277\030M\177>\236\306\325\237\365/R\353\217\314U\336\241\373\030\247f\307E\013}\001\205;y\240\313\364\276^\r\032\031\225v\235\211\347i\263\335H\343\236\337\355\355\243 \226\010\255J\354\253\030\022\372\276\211\221\264\330\3260\322\364S\254C\0336\277T_K2\322\264\003\250T\262~\310t\232\342\0332\323\220o\276\364\375<\3351?\225\330\3031\265\253d/\324b\334\034S\332\225kL\307\351\222\321\326\035\350\327X\314\223\210\222w$\373\233\240\025\230\353\351\333\240\242\240\342\344qSX?]i.\225\235\355\222\332Z?\233N\241h\317\206\202\262\364\251\0368\244\251s\032S\017\327\236\237\337\253\211E\365<Oy\364DU\247\367\313\365\"-Y\316\314>\211\344@%\r\323\345\017\244E\273\225\214\006\355\2309s\255'nd\317\336[\005\326\356\207\275Z\371r\231U\020\362s\313]K}\255a\331\014\314\2070@w%\027\366#m\332.\233\300\024_\311v\2638\3227/\376PC\221\032\363\324\346\320\0131bY\373M\203o&,~\312S\375\325.\245\240\n\036\316w\311b\224\333\206\345M\355uMY\rk\r\366\221\251\305l\220-\216a\0165\315\030\207o\262\247\222S\206\3436\253\272\0045L\375\324\367M\207\362\236\332&+\177\026+\362`E\036\254\310\203\025y\260\"\017V\344\301\212<X\221\007+\362`E\036\254\310\203\025y\260\"\017V\344\301\212<X\221\007+\362`E\036\254\310\203\025y\260\"\017V\344\301\212<X\221\007+""\362`E\036\254\310\203\025y\260\"\017V\344\301\212<X\221\007+\362`E\036\254\310\203\025y\260\"\017V\344\301\212<X\221\007+\362`E\036\254\310\203\025y\260\"\017V\344\301\212<X\221\007+\362`E\036\254\310\203\025y\260\"\017V\344\301\212<\370\177\206<\200\311<\200\003z\000\226z\000/\366\000.\356\001x\360\001<\344\003\370\321\007p\260\017\340\206\037 \232~\200h\374\001\202\370\007p\003\020\340& \300\201@\200\207\201\000?\004\001,\005\001<\006\001>\016\002| \0048I\010\360\242\020`\263\020@\301\020`\323\020\300\343\020\340\342!\300\005D@\000\021\001~$\002\374L\0048\240\010`\251\010pa\021@p\021\340\002#\200$#\200G#\200g#\200\207#\200\247#\020Zf\341\021|\0040\200\004\360\204\004\004 \022\3002\022\300B\022\020EI@ &\001!\234\004p\240\004\270H\t\340Q\tp\260\022\300\303\022\340\241%\300\215K\200\207\227\000\0170\001nb\002\334\310\004D1\023@C\023\300P\023\020\202M\200\217\233\000\0378\001.r\002Xt\002\334\354\004\370\340\t\210\246'\300\205O\200\213\237\000\007@\001,A\001N\204\002\002\031\n\010\203(\200\244(\300\205Q@\030G\001$H\001N\222\002\242P\np\262\024@\303\024\300\322\024`\341\024\300\363\024\340\003*\300ET\200\033\251\0007S\001^\250\002|T\005\370\260\n\010\344*\200\007+\200%+\200E+\300\305V\200\007\256\000\037]\001.\274\002\242\370\np\000\026\300\021\026\020\201X\000\317X\000\017Y@\010e\001<f\201\367.\216$\373\2335\322\0028\324\002\\\254\005X\260\005\360\264\005\304\341\026\020\315[\200\027\270\000\226\270\200 \344\002\302\230\013\360A\027\020F]\200\033\273\000\216\273\200P\360\002x\362\002\\\350\005\270\330\013\360\303\027@\322\027\340\307/\300\317_\000\017`\200\233\300\200\000\004\003\002\030\014\010\2070\200\2470 \034\303\200@\016\003| \006\270I\014\360\241\030\020\314b@0\214\001.\032\003\216\3021 \220\307\200` \003\274D\006\204 \031\340f2 \002\312\000\226\312\000'\226\001!\\\006\370\301\014\010\"3\200D3\300\303f@\010\234\001\036:\003|x\006:\004\265\203B\366\305XB\003\010D\003\302\031\r\240 \r\360P\032\020\200i\200\233\323\000\027\250\001,\251\0014\252\001\016V\003HX\003X""Z\0038\\\003\334\274\006x\200\r`\210\r\010F6\300\317l\000\007m\200\213\332\000\017\266\001Nn\003Xp\003B\311\r\210@7\200e7\300\005o\200\213\336\000\027\276\001>~\003|\000\0078\t\016p!\034@2\034\340\2028\300Eq@\024\306\001\001\034\007\360 \007\360$\007\004\240\034\340c9\370\003\355y\206O\320\034\340\3049 \204\347\000\026\350\000/\321\001n\244\003(\246\003\342\240\016\010\246: \030\353\200 \256\203+g\336D\367\213\327\301\243\035\340d; \004\356\0007\335\001>\274\003B\370\016 \001\017`\t\017\010G<\300\301x\200\013\362\200 \312\003X\314\003\234\234\007\370@\017`I\017\010A=\300\305z\000\001{\200E{\000\205{\000\303{\200\033\370\000\206\370\2008\344\003B\231\017\010\205> \200\372\200 \354\003\202\270\017\360\202\037p\014\371\001\361\350\207 \225\200\267\231}\220\207\376\200#\360\017\210\343?\200\001@\200!@\200G@\300\301\200\200\027\002\001\236\002\001\026\003\001\206\003\001\006\004\001\226\004\001\007\n\002\024\013\002\024\014\002\004\r\002\034\016\002\024\017\002\004\020\002\014\021\002n$\004B\230\020pA!\340\242B\200\303B \200\013\001\016\014\001?\031\002\0164\004\\l\010\370\341\020p\321!\340\301C \204\017\001\016\020\001\007!\002\001\210\010\370\031\021\360B\"\340\245D \000\023\001\017'\002~P\004|\244\010\004\240\"\020\300\212\000\013\213\000I\213\000\205\213\000\303\213\200\013\030\001\216\030\001'2\002\036f\004\034\320\010p\324\010\270\261\021\010\341F\300\013\216\200\207\034\001\036\035\201pv\004\010x\004\010z\004h|\004\034\374\010\004\002$\300\023$`!$\3001$`C$\020H\221\000\217\221\000\313\221\200\033$\001/I\002,J\002.\226\004t\230\004t\232\304\177\000\276\357\331\027";
    PyObject *data = __Pyx_DecompressString(cstring, 5379, 1);
    if (unlikely(!data)) __PYX_ERR(0, 1, __pyx_L1_error)
    const char* const bytes = __Pyx_PyBytes_AsString(data);
    #if !CYTHON_ASSUME_SAFE_MACROS
    if (likely(bytes)); else { Py_DECREF(data); __PYX_ERR(0, 1, __pyx_L1_error) }
    #endif
    #else /* compression: none (50072 bytes) */
const char* const bytes = "?cline_in_traceback__main____module____name____pyx_capi____qualname__setdefault__test__char const *(nvmlReturn_t)\000nvmlReturn_t (char *, unsigned int)\000\000nvmlReturn_t (char const *, nvmlDevice_t *)\000\000\000nvmlReturn_t (int *)\000\000nvmlReturn_t (nvmlComputeInstance_t)\000nvmlReturn_t (nvmlComputeInstance_t, nvmlComputeInstanceInfo_t *)\000nvmlReturn_t (nvmlConfComputeGetKeyRotationThresholdInfo_t *)\000nvmlReturn_t (nvmlConfComputeSetKeyRotationThresholdInfo_t *)\000nvmlReturn_t (nvmlConfComputeSystemCaps_t *)\000nvmlReturn_t (nvmlConfComputeSystemState_t *)\000nvmlReturn_t (nvmlDevice_t)\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, char *)\000nvmlReturn_t (nvmlDevice_t, char *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, char *, unsigned int)\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, int *)\000\000nvmlReturn_t (nvmlDevice_t, int *, int *)\000\000\000nvmlReturn_t (nvmlDevice_t, int, nvmlFieldValue_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlBAR1Memory_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlBrandType_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlBridgeChipHierarchy_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlBusType_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlC2cModeInfo_v1_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlClkMonStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlClockOffset_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlClockType_t, nvmlClockId_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlClockType_t, nvmlPstates_t, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlClockType_t, unsigned int *)\000\000\000nvmlReturn_t (nvmlDevice_t, nvmlComputeMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlComputeMode_t)\000nvmlReturn_t (nvmlDevice_t, nvmlConfComputeGpuAttestationReport_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlConfComputeGpuCertificate_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlConfComputeMemSizeInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlCoolerInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceAddressingMode_t *)\000nvmlReturn_t (""nvmlDevice_t, nvmlDeviceArchitecture_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceAttributes_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceCapabilities_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceCurrentClockFreqs_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevicePerfModes_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevicePowerMizerModes_v1_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceVgpuCapability_t, nvmlEnableState_t)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceVgpuCapability_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t, int *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t, nvmlGpuP2PCapsIndex_t, nvmlGpuP2PStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t, nvmlGpuTopologyLevel_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDramEncryptionInfo_t *, nvmlDramEncryptionInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDramEncryptionInfo_t const *)\000nvmlReturn_t (nvmlDevice_t, nvmlDriverModel_t *, nvmlDriverModel_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDriverModel_t, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlEccCounterType_t)\000nvmlReturn_t (nvmlDevice_t, nvmlEccSramErrorStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlEccSramUniqueUncorrectedErrorCounts_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t *)\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t *, nvmlEnableState_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t)\000\000\000\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlEncoderType_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlFBCStats_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlFanSpeedInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpmSample_t)\000nvmlReturn_t (nvmlDevice_t, nvmlGpmSupport_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuDynamicPstatesInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuFabricInfoV_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuOperationMode_t *, nvmlGpuOperationMode_t *)\000nvmlReturn_t (nvml""Device_t, nvmlGpuOperationMode_t)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuTopologyLevel_t, unsigned int *, nvmlDevice_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuVirtualizationMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuVirtualizationMode_t)\000nvmlReturn_t (nvmlDevice_t, nvmlGridLicensableFeatures_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlHostVgpuMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlHostname_v1_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlInforomObject_t, char *, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlMarginTemperature_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemoryErrorType_t, nvmlEccCounterType_t, nvmlMemoryLocation_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemoryErrorType_t, nvmlEccCounterType_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemory_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemory_v2_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvLinkInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvLinkPowerThres_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvlinkGetBwMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvlinkSetBwMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvlinkSupportedBwModes_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPRMTLV_v1_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPageRetirementCause_t, unsigned int *, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlPageRetirementCause_t, unsigned int *, unsigned PY_LONG_LONG *, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlPciInfoExt_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPciInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPcieUtilCounter_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlPdi_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPlatformInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPowerSmoothingProfile_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlPowerSmoothingState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPowerSource_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPowerValue_v2_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlProcessDetailList_t *)\000nvmlReturn_t (nvm""lDevice_t, nvmlProcessUtilizationSample_t *, unsigned int *, unsigned PY_LONG_LONG)\000nvmlReturn_t (nvmlDevice_t, nvmlProcessesUtilizationInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPstates_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlPstates_t *, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlRepairStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlRestrictedAPI_t, nvmlEnableState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlRestrictedAPI_t, nvmlEnableState_t)\000nvmlReturn_t (nvmlDevice_t, nvmlRowRemapperHistogramValues_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlSamplingType_t, unsigned PY_LONG_LONG, nvmlValueType_t *, unsigned int *, nvmlSample_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlTemperatureThresholds_t, int *)\000nvmlReturn_t (nvmlDevice_t, nvmlTemperatureThresholds_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlTemperature_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlUtilization_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuHeterogeneousMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuHeterogeneousMode_t const *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuInstancesUtilizationInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuPgpuMetadata_t *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuProcessesUtilizationInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerCapabilities_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerGetState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerLog_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerSetState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuTypeId_t, nvmlVgpuPlacementList_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuTypeId_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlWorkloadPowerProfileRequestedProfiles_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG *)\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG *, unsigned long *)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG, nvmlEventSet_t)\000nvmlReturn_t (nvmlDevi""ce_t, unsigned PY_LONG_LONG, nvmlValueType_t *, unsigned int *, nvmlVgpuInstanceUtilizationSample_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG, unsigned int *, nvmlVgpuProcessUtilizationSample_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *)\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlEncoderSessionInfo_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlFBCSessionInfo_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlProcessInfo_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlVgpuInstance_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlVgpuTypeId_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, unsigned int *)\000\000\000\000\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, unsigned int *, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int)\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlAccountingStats_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlDevice_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlEnableState_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlFanControlPolicy_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlFanControlPolicy_t)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpmSample_t)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstancePlacement_t *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstancePlacement_t const *, nvmlGpuInstance_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstanceProfileInfo_v2_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstance_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstance_t *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuThermalSettings_t *)\000nvmlReturn_t (nvmlDevice_t, un""signed int, nvmlIntNvLinkDeviceType_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlNvLinkCapability_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlNvLinkErrorCounter_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlPciInfo_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlReturn_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned int *)\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned int)\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned long *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned long *, nvmlAffinityScope_t)\000\000nvmlReturn_t (nvmlEventSet_t *)\000nvmlReturn_t (nvmlEventSet_t)\000nvmlReturn_t (nvmlEventSet_t, nvmlEventData_t *, unsigned int)\000nvmlReturn_t (nvmlGpuInstance_t)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlActiveVgpuInstanceInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlGpuInstanceInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuCreatablePlacementInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuHeterogeneousMode_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuHeterogeneousMode_t const *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuSchedulerLogInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuSchedulerStateInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuSchedulerState_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuTypeIdInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstancePlacement_t *, unsigned int *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstancePlacement_t const *, nvmlComputeInstance_t *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *)\000\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *, unsigned int *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, unsigned int *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, unsigned int, nvmlC""omputeInstanceProfileInfo_v2_t *)\000nvmlReturn_t (nvmlPciInfo_t *)\000nvmlReturn_t (nvmlPciInfo_t *, nvmlDetachGpuState_t, nvmlPcieLinkState_t)\000nvmlReturn_t (nvmlPciInfo_t *, nvmlEnableState_t *)\000nvmlReturn_t (nvmlPciInfo_t *, nvmlEnableState_t)\000nvmlReturn_t (nvmlSystemConfComputeSettings_t *)\000nvmlReturn_t (nvmlSystemDriverBranchInfo_t *, unsigned int)\000nvmlReturn_t (nvmlSystemEventSetCreateRequest_t *)\000nvmlReturn_t (nvmlSystemEventSetFreeRequest_t *)\000nvmlReturn_t (nvmlSystemEventSetWaitRequest_t *)\000nvmlReturn_t (nvmlSystemRegisterEventRequest_t *)\000nvmlReturn_t (nvmlUUID_t const *, nvmlDevice_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlLedColor_t)\000nvmlReturn_t (nvmlUnit_t, nvmlLedState_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlPSUInfo_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlUnitFanSpeeds_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlUnitInfo_t *)\000nvmlReturn_t (nvmlUnit_t, unsigned int *, nvmlDevice_t *)\000nvmlReturn_t (nvmlUnit_t, unsigned int, unsigned int *)\000nvmlReturn_t (nvmlVgpuDriverCapability_t, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t)\000nvmlReturn_t (nvmlVgpuInstance_t, char *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, char *, unsigned int)\000\000\000nvmlReturn_t (nvmlVgpuInstance_t, char *, unsigned int, nvmlVgpuVmIdType_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlEnableState_t *)\000\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlFBCStats_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuLicenseInfo_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuMetadata_t *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuPlacementId_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuRuntimeState_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuTypeId_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *)\000\000\000\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, nvmlEncoderSessionInfo_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, nvmlFBC""SessionInfo_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int, nvmlAccountingStats_t *)\000nvmlReturn_t (nvmlVgpuMetadata_t *, nvmlVgpuPgpuMetadata_t *, nvmlVgpuPgpuCompatibility_t *)\000nvmlReturn_t (nvmlVgpuTypeId_t, char *, unsigned int *)\000\000nvmlReturn_t (nvmlVgpuTypeId_t, char *, unsigned int)\000nvmlReturn_t (nvmlVgpuTypeId_t, nvmlVgpuCapability_t, unsigned int *)\000nvmlReturn_t (nvmlVgpuTypeId_t, nvmlVgpuTypeBar1Info_t *)\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *)\000\000\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned int *)\000\000\000\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned int, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlVgpuTypeMaxInstance_t *)\000nvmlReturn_t (nvmlVgpuVersion_t *)\000nvmlReturn_t (nvmlVgpuVersion_t *, nvmlVgpuVersion_t *)\000nvmlReturn_t (unsigned int *)\000\000\000\000\000nvmlReturn_t (unsigned int *, nvmlHwbcEntry_t *)\000nvmlReturn_t (unsigned int)\000\000\000nvmlReturn_t (unsigned int, char *, unsigned int)\000nvmlReturn_t (unsigned int, nvmlDevice_t *)\000nvmlReturn_t (unsigned int, nvmlExcludedDeviceInfo_t *)\000nvmlReturn_t (unsigned int, nvmlUnit_t *)\000nvmlReturn_t (unsigned int, unsigned int *, nvmlDevice_t *)\000nvmlReturn_t (void)\000\000nvmlErrorString\000nvmlSystemGetDriverVersion\000nvmlSystemGetNVMLVersion\000nvmlDeviceGetHandleByPciBusId_v2\000nvmlDeviceGetHandleBySerial\000nvmlDeviceGetHandleByUUID\000nvmlSystemGetCudaDriverVersion\000nvmlSystemGetCudaDriverVersion_v2\000nvmlComputeInstanceDestroy\000nvmlComputeInstanceGetInfo_v2\000nvmlSystemGetConfComputeKeyRotationThresholdInfo\000nvmlSystemSetConfComputeKeyRotationThresholdInfo\000nvmlSystemGetConfComputeCapabilities\000nvmlSystemGetConfCompute""State\000nvmlDeviceClearAccountingPids\000nvmlDeviceClearCpuAffinity\000nvmlDeviceResetGpuLockedClocks\000nvmlDeviceResetMemoryLockedClocks\000nvmlDeviceSetCpuAffinity\000nvmlDeviceValidateInforom\000nvmlDeviceGetGspFirmwareVersion\000nvmlDeviceGetPgpuMetadataString\000nvmlDeviceGetBoardPartNumber\000nvmlDeviceGetInforomImageVersion\000nvmlDeviceGetName\000nvmlDeviceGetSerial\000nvmlDeviceGetUUID\000nvmlDeviceGetVbiosVersion\000nvmlDeviceGetGpcClkVfOffset\000nvmlDeviceGetMemClkVfOffset\000nvmlDeviceGetCudaComputeCapability\000nvmlDeviceGetGpcClkMinMaxVfOffset\000nvmlDeviceGetMemClkMinMaxVfOffset\000nvmlDeviceClearFieldValues\000nvmlDeviceGetFieldValues\000nvmlDeviceGetBAR1MemoryInfo\000nvmlDeviceGetBrand\000nvmlDeviceGetBridgeChipInfo\000nvmlDeviceGetBusType\000nvmlDeviceGetC2cModeInfoV\000nvmlDeviceGetClkMonStatus\000nvmlDeviceGetClockOffsets\000nvmlDeviceSetClockOffsets\000nvmlDeviceGetClock\000nvmlDeviceGetMinMaxClockOfPState\000nvmlDeviceGetClockInfo\000nvmlDeviceGetMaxClockInfo\000nvmlDeviceGetMaxCustomerBoostClock\000nvmlDeviceGetComputeMode\000nvmlDeviceSetComputeMode\000nvmlDeviceGetConfComputeGpuAttestationReport\000nvmlDeviceGetConfComputeGpuCertificate\000nvmlDeviceGetConfComputeMemSizeInfo\000nvmlDeviceGetCoolerInfo\000nvmlDeviceGetAddressingMode\000nvmlDeviceGetArchitecture\000nvmlDeviceGetAttributes_v2\000nvmlDeviceGetCapabilities\000nvmlDeviceGetCurrentClockFreqs\000nvmlDeviceGetPerformanceModes\000nvmlDeviceGetPowerMizerMode_v1\000nvmlDeviceSetPowerMizerMode_v1\000nvmlDeviceSetVgpuCapabilities\000nvmlDeviceGetVgpuCapabilities\000nvmlDeviceGetDeviceHandleFromMigDeviceHandle\000nvmlDeviceOnSameBoard\000nvmlDeviceGetP2PStatus\000nvmlDeviceGetTopologyCommonAncestor\000nvmlDeviceGetDramEncryptionMode\000nvmlDeviceSetDramEncryptionMode\000nvmlDeviceGetDriverModel_v2\000nvmlDeviceSetDriverModel\000nvmlDeviceClearEccErrorCounts\000nvmlDeviceGetSramEccErrorStatus\000nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts\000nvmlDeviceGetAccountingMode\000nvmlDeviceG""etDefaultEccMode\000nvmlDeviceGetDisplayActive\000nvmlDeviceGetDisplayMode\000nvmlDeviceGetPersistenceMode\000nvmlDeviceGetRetiredPagesPendingStatus\000nvmlDeviceGetAutoBoostedClocksEnabled\000nvmlDeviceGetEccMode\000nvmlDeviceSetAccountingMode\000nvmlDeviceSetAutoBoostedClocksEnabled\000nvmlDeviceSetEccMode\000nvmlDeviceSetPersistenceMode\000nvmlDeviceSetDefaultAutoBoostedClocksEnabled\000nvmlDeviceGetEncoderCapacity\000nvmlDeviceGetFBCStats\000nvmlDeviceGetFanSpeedRPM\000nvmlGpmSampleGet\000nvmlGpmQueryDeviceSupport\000nvmlDeviceGetDynamicPstatesInfo\000nvmlDeviceGetGpuFabricInfoV\000nvmlDeviceGetGpuOperationMode\000nvmlDeviceSetGpuOperationMode\000nvmlDeviceGetTopologyNearestGpus\000nvmlDeviceGetVirtualizationMode\000nvmlDeviceSetVirtualizationMode\000nvmlDeviceGetGridLicensableFeatures_v4\000nvmlDeviceGetHostVgpuMode\000nvmlDeviceGetHostname_v1\000nvmlDeviceSetHostname_v1\000nvmlDeviceGetInforomVersion\000nvmlDeviceGetMarginTemperature\000nvmlDeviceGetMemoryErrorCounter\000nvmlDeviceGetTotalEccErrors\000nvmlDeviceGetConfComputeProtectedMemoryUsage\000nvmlDeviceGetMemoryInfo_v2\000nvmlDeviceGetNvLinkInfo\000nvmlDeviceSetNvLinkDeviceLowPowerThreshold\000nvmlDeviceGetNvlinkBwMode\000nvmlDeviceSetNvlinkBwMode\000nvmlDeviceGetNvlinkSupportedBwModes\000nvmlDeviceReadWritePRM_v1\000nvmlDeviceGetRetiredPages\000nvmlDeviceGetRetiredPages_v2\000nvmlDeviceGetPciInfoExt\000nvmlDeviceGetPciInfo_v3\000nvmlDeviceGetPcieThroughput\000nvmlDeviceGetPdi\000nvmlDeviceGetPlatformInfo\000nvmlDevicePowerSmoothingActivatePresetProfile\000nvmlDevicePowerSmoothingUpdatePresetProfileParam\000nvmlDevicePowerSmoothingSetState\000nvmlDeviceGetPowerSource\000nvmlDeviceSetPowerManagementLimit_v2\000nvmlDeviceGetRunningProcessDetailList\000nvmlDeviceGetProcessUtilization\000nvmlDeviceGetProcessesUtilizationInfo\000nvmlDeviceGetPerformanceState\000nvmlDeviceGetPowerState\000nvmlDeviceGetSupportedPerformanceStates\000nvmlDeviceGetRepairStatus\000nvmlDeviceGetAPIRestriction\000nvmlDeviceSetAPIRest""riction\000nvmlDeviceGetRowRemapperHistogram\000nvmlDeviceGetSamples\000nvmlDeviceSetTemperatureThreshold\000nvmlDeviceGetTemperatureThreshold\000nvmlDeviceGetTemperatureV\000nvmlDeviceGetUtilizationRates\000nvmlDeviceGetVgpuHeterogeneousMode\000nvmlDeviceSetVgpuHeterogeneousMode\000nvmlDeviceGetVgpuInstancesUtilizationInfo\000nvmlDeviceGetVgpuMetadata\000nvmlDeviceGetVgpuProcessesUtilizationInfo\000nvmlDeviceGetVgpuSchedulerCapabilities\000nvmlDeviceGetVgpuSchedulerState\000nvmlDeviceGetVgpuSchedulerLog\000nvmlDeviceSetVgpuSchedulerState\000nvmlDeviceGetVgpuTypeCreatablePlacements\000nvmlDeviceGetVgpuTypeSupportedPlacements\000nvmlVgpuTypeGetMaxInstances\000nvmlDeviceWorkloadPowerProfileClearRequestedProfiles\000nvmlDeviceGetCurrentClocksEventReasons\000nvmlDeviceGetSupportedClocksEventReasons\000nvmlDeviceGetSupportedEventTypes\000nvmlDeviceGetTotalEnergyConsumption\000nvmlDeviceGetLastBBXFlushTime\000nvmlDeviceSetConfComputeUnprotectedMemSize\000nvmlDeviceRegisterEvents\000nvmlDeviceGetVgpuUtilization\000nvmlDeviceGetVgpuProcessUtilization\000nvmlDeviceGetAccountingBufferSize\000nvmlDeviceGetAdaptiveClockInfoStatus\000nvmlDeviceGetBoardId\000nvmlDeviceGetComputeInstanceId\000nvmlDeviceGetCurrPcieLinkGeneration\000nvmlDeviceGetCurrPcieLinkWidth\000nvmlDeviceGetEnforcedPowerLimit\000nvmlDeviceGetFanSpeed\000nvmlDeviceGetGpuInstanceId\000nvmlDeviceGetGpuMaxPcieLinkGeneration\000nvmlDeviceGetIndex\000nvmlDeviceGetInforomConfigurationChecksum\000nvmlDeviceGetIrqNum\000nvmlDeviceGetMaxMigDeviceCount\000nvmlDeviceGetMaxPcieLinkGeneration\000nvmlDeviceGetMaxPcieLinkWidth\000nvmlDeviceGetMemoryBusWidth\000nvmlDeviceGetMinorNumber\000nvmlDeviceGetModuleId\000nvmlDeviceGetMultiGpuBoard\000nvmlDeviceGetNumFans\000nvmlDeviceGetNumGpuCores\000nvmlDeviceGetNumaNodeId\000nvmlDeviceGetPcieLinkMaxSpeed\000nvmlDeviceGetPcieReplayCounter\000nvmlDeviceGetPcieSpeed\000nvmlDeviceGetPowerManagementDefaultLimit\000nvmlDeviceGetPowerManagementLimit\000nvmlDeviceGetPowerUsage\000nvmlDevice""IsMigDeviceHandle\000nvmlGpmQueryIfStreamingEnabled\000nvmlDeviceGetEncoderSessions\000nvmlDeviceGetFBCSessions\000nvmlDeviceGetComputeRunningProcesses_v3\000nvmlDeviceGetMPSComputeRunningProcesses_v3\000nvmlDeviceGetActiveVgpus\000nvmlDeviceGetCreatableVgpus\000nvmlDeviceGetSupportedVgpus\000nvmlDeviceGetAccountingPids\000nvmlDeviceGetDecoderUtilization\000nvmlDeviceGetEncoderUtilization\000nvmlDeviceGetGspFirmwareMode\000nvmlDeviceGetJpgUtilization\000nvmlDeviceGetMigMode\000nvmlDeviceGetMinMaxFanSpeed\000nvmlDeviceGetOfaUtilization\000nvmlDeviceGetPowerManagementLimitConstraints\000nvmlDeviceGetSupportedMemoryClocks\000nvmlDeviceGetEncoderStats\000nvmlDeviceGetRemappedRows\000nvmlDeviceResetNvLinkErrorCounters\000nvmlDeviceSetDefaultFanSpeed_v2\000nvmlDeviceSetPowerManagementLimit\000nvmlGpmSetStreamingEnabled\000nvmlDeviceGetAccountingStats\000nvmlDeviceGetMigDeviceHandleByIndex\000nvmlDeviceGetNvLinkState\000nvmlDeviceGetFanControlPolicy_v2\000nvmlDeviceSetFanControlPolicy\000nvmlGpmMigSampleGet\000nvmlDeviceGetGpuInstancePossiblePlacements_v2\000nvmlDeviceCreateGpuInstanceWithPlacement\000nvmlDeviceGetGpuInstanceProfileInfoByIdV\000nvmlDeviceGetGpuInstanceProfileInfoV\000nvmlDeviceCreateGpuInstance\000nvmlDeviceGetGpuInstanceById\000nvmlDeviceGetGpuInstances\000nvmlDeviceGetThermalSettings\000nvmlDeviceGetNvLinkRemoteDeviceType\000nvmlDeviceGetNvLinkCapability\000nvmlDeviceGetNvLinkErrorCounter\000nvmlDeviceGetNvLinkRemotePciInfo_v2\000nvmlDeviceSetMigMode\000nvmlDeviceGetFanSpeed_v2\000nvmlDeviceGetGpuInstanceRemainingCapacity\000nvmlDeviceGetNvLinkVersion\000nvmlDeviceGetTargetFanSpeed\000nvmlDeviceGetSupportedGraphicsClocks\000nvmlDeviceSetFanSpeed_v2\000nvmlDeviceSetGpuLockedClocks\000nvmlDeviceSetMemoryLockedClocks\000nvmlDeviceGetCpuAffinity\000nvmlDeviceGetCpuAffinityWithinScope\000nvmlDeviceGetMemoryAffinity\000nvmlEventSetCreate\000nvmlEventSetFree\000nvmlEventSetWait_v2\000nvmlGpuInstanceDestroy\000nvmlGpuInstanceGetActiveVgpus\000nvmlGpuInstanceGetI""nfo\000nvmlGpuInstanceGetVgpuTypeCreatablePlacements\000nvmlGpuInstanceGetVgpuHeterogeneousMode\000nvmlGpuInstanceSetVgpuHeterogeneousMode\000nvmlGpuInstanceGetVgpuSchedulerLog\000nvmlGpuInstanceGetVgpuSchedulerState\000nvmlGpuInstanceSetVgpuSchedulerState\000nvmlGpuInstanceGetCreatableVgpus\000nvmlGpuInstanceGetComputeInstancePossiblePlacements\000nvmlGpuInstanceCreateComputeInstanceWithPlacement\000nvmlGpuInstanceCreateComputeInstance\000nvmlGpuInstanceGetComputeInstanceById\000nvmlGpuInstanceGetComputeInstances\000nvmlGpuInstanceGetComputeInstanceRemainingCapacity\000nvmlGpuInstanceGetComputeInstanceProfileInfoV\000nvmlDeviceDiscoverGpus\000nvmlDeviceRemoveGpu_v2\000nvmlDeviceQueryDrainState\000nvmlDeviceModifyDrainState\000nvmlSystemGetConfComputeSettings\000nvmlSystemGetDriverBranch\000nvmlSystemEventSetCreate\000nvmlSystemEventSetFree\000nvmlSystemEventSetWait\000nvmlSystemRegisterEvents\000nvmlDeviceGetHandleByUUIDV\000nvmlUnitSetLedState\000nvmlUnitGetLedState\000nvmlUnitGetPsuInfo\000nvmlUnitGetFanSpeedInfo\000nvmlUnitGetUnitInfo\000nvmlUnitGetDevices\000nvmlUnitGetTemperature\000nvmlGetVgpuDriverCapabilities\000nvmlVgpuInstanceClearAccountingPids\000nvmlVgpuInstanceGetGpuPciId\000nvmlVgpuInstanceGetMdevUUID\000nvmlVgpuInstanceGetUUID\000nvmlVgpuInstanceGetVmDriverVersion\000nvmlVgpuInstanceGetVmID\000nvmlVgpuInstanceGetAccountingMode\000nvmlVgpuInstanceGetEccMode\000nvmlVgpuInstanceGetFBCStats\000nvmlVgpuInstanceGetLicenseInfo_v2\000nvmlVgpuInstanceGetMetadata\000nvmlVgpuInstanceGetPlacementId\000nvmlVgpuInstanceGetRuntimeStateSize\000nvmlVgpuInstanceGetType\000nvmlVgpuInstanceGetFbUsage\000nvmlVgpuInstanceGetEncoderCapacity\000nvmlVgpuInstanceGetFrameRateLimit\000nvmlVgpuInstanceGetGpuInstanceId\000nvmlVgpuInstanceGetLicenseStatus\000nvmlVgpuInstanceGetEncoderSessions\000nvmlVgpuInstanceGetFBCSessions\000nvmlVgpuInstanceGetAccountingPids\000nvmlVgpuInstanceGetEncoderStats\000nvmlVgpuInstanceSetEncoderCapacity\000nvmlVgpuInstanceGetAccountingStats\000nvmlG""etVgpuCompatibility\000nvmlVgpuTypeGetClass\000nvmlVgpuTypeGetName\000nvmlVgpuTypeGetLicense\000nvmlVgpuTypeGetCapabilities\000nvmlVgpuTypeGetBAR1Info\000nvmlVgpuTypeGetFbReservation\000nvmlVgpuTypeGetFramebufferSize\000nvmlVgpuTypeGetGspHeapSize\000nvmlVgpuTypeGetDeviceID\000nvmlVgpuTypeGetFrameRateLimit\000nvmlVgpuTypeGetGpuInstanceProfileId\000nvmlVgpuTypeGetMaxInstancesPerVm\000nvmlVgpuTypeGetNumDisplayHeads\000nvmlVgpuTypeGetResolution\000nvmlVgpuTypeGetMaxInstancesPerGpuInstance\000nvmlSetVgpuVersion\000nvmlGetVgpuVersion\000nvmlDeviceGetCount_v2\000nvmlGetExcludedDeviceCount\000nvmlSystemGetConfComputeGpusReadyState\000nvmlSystemGetNvlinkBwMode\000nvmlUnitGetCount\000nvmlSystemGetHicVersion\000nvmlInitWithFlags\000nvmlSystemSetConfComputeGpusReadyState\000nvmlSystemSetNvlinkBwMode\000nvmlSystemGetProcessName\000nvmlDeviceGetHandleByIndex_v2\000nvmlGetExcludedDeviceInfoByIndex\000nvmlUnitGetHandleByIndex\000nvmlSystemGetTopologyGpuSet\000nvmlInit_v2\000nvmlShutdownchar const *(nvmlReturn_t)\000nvmlReturn_t (char *, unsigned int)\000\000nvmlReturn_t (char const *, nvmlDevice_t *)\000\000\000nvmlReturn_t (int *)\000\000nvmlReturn_t (nvmlComputeInstance_t)\000nvmlReturn_t (nvmlComputeInstance_t, nvmlComputeInstanceInfo_t *)\000nvmlReturn_t (nvmlConfComputeGetKeyRotationThresholdInfo_t *)\000nvmlReturn_t (nvmlConfComputeSetKeyRotationThresholdInfo_t *)\000nvmlReturn_t (nvmlConfComputeSystemCaps_t *)\000nvmlReturn_t (nvmlConfComputeSystemState_t *)\000nvmlReturn_t (nvmlDevice_t)\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, char *)\000nvmlReturn_t (nvmlDevice_t, char *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, char *, unsigned int)\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, int *)\000\000nvmlReturn_t (nvmlDevice_t, int *, int *)\000\000\000nvmlReturn_t (nvmlDevice_t, int, nvmlFieldValue_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlBAR1Memory_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlBrandType_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlBridgeChipHierarc""hy_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlBusType_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlC2cModeInfo_v1_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlClkMonStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlClockOffset_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlClockType_t, nvmlClockId_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlClockType_t, nvmlPstates_t, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlClockType_t, unsigned int *)\000\000\000nvmlReturn_t (nvmlDevice_t, nvmlComputeMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlComputeMode_t)\000nvmlReturn_t (nvmlDevice_t, nvmlConfComputeGpuAttestationReport_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlConfComputeGpuCertificate_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlConfComputeMemSizeInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlCoolerInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceAddressingMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceArchitecture_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceAttributes_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceCapabilities_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceCurrentClockFreqs_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevicePerfModes_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevicePowerMizerModes_v1_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceVgpuCapability_t, nvmlEnableState_t)\000nvmlReturn_t (nvmlDevice_t, nvmlDeviceVgpuCapability_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t, int *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t, nvmlGpuP2PCapsIndex_t, nvmlGpuP2PStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDevice_t, nvmlGpuTopologyLevel_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDramEncryptionInfo_t *, nvmlDramEncryptionInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDramEncryptionInfo_t const *)\000nvmlReturn_t (nvmlDevice_t, nvmlDriverModel_t *, nvmlDriverModel_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlDriverModel_t, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlEccCounterType_t)""\000nvmlReturn_t (nvmlDevice_t, nvmlEccSramErrorStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlEccSramUniqueUncorrectedErrorCounts_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t *)\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t *, nvmlEnableState_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t)\000\000\000\000nvmlReturn_t (nvmlDevice_t, nvmlEnableState_t, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlEncoderType_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlFBCStats_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlFanSpeedInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpmSample_t)\000nvmlReturn_t (nvmlDevice_t, nvmlGpmSupport_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuDynamicPstatesInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuFabricInfoV_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuOperationMode_t *, nvmlGpuOperationMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuOperationMode_t)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuTopologyLevel_t, unsigned int *, nvmlDevice_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuVirtualizationMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlGpuVirtualizationMode_t)\000nvmlReturn_t (nvmlDevice_t, nvmlGridLicensableFeatures_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlHostVgpuMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlHostname_v1_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlInforomObject_t, char *, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlMarginTemperature_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemoryErrorType_t, nvmlEccCounterType_t, nvmlMemoryLocation_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemoryErrorType_t, nvmlEccCounterType_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemory_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlMemory_v2_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvLinkInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvLinkPowerThres_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvlinkGetBwMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlNvlinkSetBwMode_t *)\000nvmlReturn""_t (nvmlDevice_t, nvmlNvlinkSupportedBwModes_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPRMTLV_v1_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPageRetirementCause_t, unsigned int *, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlPageRetirementCause_t, unsigned int *, unsigned PY_LONG_LONG *, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, nvmlPciInfoExt_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPciInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPcieUtilCounter_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlPdi_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPlatformInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPowerSmoothingProfile_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlPowerSmoothingState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPowerSource_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPowerValue_v2_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlProcessDetailList_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlProcessUtilizationSample_t *, unsigned int *, unsigned PY_LONG_LONG)\000nvmlReturn_t (nvmlDevice_t, nvmlProcessesUtilizationInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlPstates_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlPstates_t *, unsigned int)\000nvmlReturn_t (nvmlDevice_t, nvmlRepairStatus_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlRestrictedAPI_t, nvmlEnableState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlRestrictedAPI_t, nvmlEnableState_t)\000nvmlReturn_t (nvmlDevice_t, nvmlRowRemapperHistogramValues_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlSamplingType_t, unsigned PY_LONG_LONG, nvmlValueType_t *, unsigned int *, nvmlSample_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlTemperatureThresholds_t, int *)\000nvmlReturn_t (nvmlDevice_t, nvmlTemperatureThresholds_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlTemperature_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlUtilization_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuHeterogeneousMode_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuHeterogeneousMode_t const *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuInstancesUtilizationInfo_t *)""\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuPgpuMetadata_t *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuProcessesUtilizationInfo_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerCapabilities_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerGetState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerLog_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuSchedulerSetState_t *)\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuTypeId_t, nvmlVgpuPlacementList_t *)\000\000nvmlReturn_t (nvmlDevice_t, nvmlVgpuTypeId_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, nvmlWorkloadPowerProfileRequestedProfiles_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG *)\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG *, unsigned long *)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG, nvmlEventSet_t)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG, nvmlValueType_t *, unsigned int *, nvmlVgpuInstanceUtilizationSample_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned PY_LONG_LONG, unsigned int *, nvmlVgpuProcessUtilizationSample_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *)\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlEncoderSessionInfo_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlFBCSessionInfo_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlProcessInfo_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlVgpuInstance_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, nvmlVgpuTypeId_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, unsigned int *)\000\000\000\000\000\000\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int *, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int *, unsigned int *, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int)\000\000\000\000nvmlReturn_t (nvmlDevice_t, ""unsigned int, nvmlAccountingStats_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlDevice_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlEnableState_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlFanControlPolicy_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlFanControlPolicy_t)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpmSample_t)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstancePlacement_t *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstancePlacement_t const *, nvmlGpuInstance_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstanceProfileInfo_v2_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstance_t *)\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuInstance_t *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlGpuThermalSettings_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlIntNvLinkDeviceType_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlNvLinkCapability_t, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlNvLinkErrorCounter_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlPciInfo_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, nvmlReturn_t *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned int *)\000\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned int)\000\000\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned long *)\000nvmlReturn_t (nvmlDevice_t, unsigned int, unsigned long *, nvmlAffinityScope_t)\000\000nvmlReturn_t (nvmlEventSet_t *)\000nvmlReturn_t (nvmlEventSet_t)\000nvmlReturn_t (nvmlEventSet_t, nvmlEventData_t *, unsigned int)\000nvmlReturn_t (nvmlGpuInstance_t)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlActiveVgpuInstanceInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlGpuInstanceInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuCreatablePlacementInfo_t *)\000nvmlReturn_t"" (nvmlGpuInstance_t, nvmlVgpuHeterogeneousMode_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuHeterogeneousMode_t const *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuSchedulerLogInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuSchedulerStateInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuSchedulerState_t *)\000nvmlReturn_t (nvmlGpuInstance_t, nvmlVgpuTypeIdInfo_t *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstancePlacement_t *, unsigned int *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstancePlacement_t const *, nvmlComputeInstance_t *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *)\000\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, nvmlComputeInstance_t *, unsigned int *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, unsigned int *)\000nvmlReturn_t (nvmlGpuInstance_t, unsigned int, unsigned int, nvmlComputeInstanceProfileInfo_v2_t *)\000nvmlReturn_t (nvmlPciInfo_t *)\000nvmlReturn_t (nvmlPciInfo_t *, nvmlDetachGpuState_t, nvmlPcieLinkState_t)\000nvmlReturn_t (nvmlPciInfo_t *, nvmlEnableState_t *)\000nvmlReturn_t (nvmlPciInfo_t *, nvmlEnableState_t)\000nvmlReturn_t (nvmlSystemConfComputeSettings_t *)\000nvmlReturn_t (nvmlSystemDriverBranchInfo_t *, unsigned int)\000nvmlReturn_t (nvmlSystemEventSetCreateRequest_t *)\000nvmlReturn_t (nvmlSystemEventSetFreeRequest_t *)\000nvmlReturn_t (nvmlSystemEventSetWaitRequest_t *)\000nvmlReturn_t (nvmlSystemRegisterEventRequest_t *)\000nvmlReturn_t (nvmlUUID_t const *, nvmlDevice_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlLedColor_t)\000nvmlReturn_t (nvmlUnit_t, nvmlLedState_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlPSUInfo_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlUnitFanSpeeds_t *)\000nvmlReturn_t (nvmlUnit_t, nvmlUnitInfo_t *)\000nvmlReturn_t (nvmlUnit_t, unsigned int *, nvmlDevice_t *)\000nvmlReturn_t (nvmlUnit_t, unsigned int, unsigned int *)\000nvmlReturn_t (nvmlVgpuDriverCapability_t, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t)\000nvmlRet""urn_t (nvmlVgpuInstance_t, char *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, char *, unsigned int)\000\000\000nvmlReturn_t (nvmlVgpuInstance_t, char *, unsigned int, nvmlVgpuVmIdType_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlEnableState_t *)\000\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlFBCStats_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuLicenseInfo_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuMetadata_t *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuPlacementId_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuRuntimeState_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, nvmlVgpuTypeId_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *)\000\000\000\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, nvmlEncoderSessionInfo_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, nvmlFBCSessionInfo_t *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int *, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int)\000nvmlReturn_t (nvmlVgpuInstance_t, unsigned int, nvmlAccountingStats_t *)\000nvmlReturn_t (nvmlVgpuMetadata_t *, nvmlVgpuPgpuMetadata_t *, nvmlVgpuPgpuCompatibility_t *)\000nvmlReturn_t (nvmlVgpuTypeId_t, char *, unsigned int *)\000\000nvmlReturn_t (nvmlVgpuTypeId_t, char *, unsigned int)\000nvmlReturn_t (nvmlVgpuTypeId_t, nvmlVgpuCapability_t, unsigned int *)\000nvmlReturn_t (nvmlVgpuTypeId_t, nvmlVgpuTypeBar1Info_t *)\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *)\000\000\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned PY_LONG_LONG *, unsigned PY_LONG_LONG *)\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned int *)\000\000\000\000nvmlReturn_t (nvmlVgpuTypeId_t, unsigned int, unsigned int *, unsigned int *)\000nvmlReturn_t (nvmlVgpuTypeMaxInstance_t *)\000nvmlReturn_t (nvmlVgpuVersion_t *)\000nvmlReturn_t (nvmlVgpuVersion_t *, nvmlVgpuVersion_t *)\000nvmlRe""turn_t (unsigned int *)\000\000\000\000\000nvmlReturn_t (unsigned int *, nvmlHwbcEntry_t *)\000nvmlReturn_t (unsigned int)\000\000\000nvmlReturn_t (unsigned int, char *, unsigned int)\000nvmlReturn_t (unsigned int, nvmlDevice_t *)\000nvmlReturn_t (unsigned int, nvmlExcludedDeviceInfo_t *)\000nvmlReturn_t (unsigned int, nvmlUnit_t *)\000nvmlReturn_t (unsigned int, unsigned int *, nvmlDevice_t *)\000nvmlReturn_t (void)\000\000_nvmlErrorString\000_nvmlSystemGetDriverVersion\000_nvmlSystemGetNVMLVersion\000_nvmlDeviceGetHandleByPciBusId_v2\000_nvmlDeviceGetHandleBySerial\000_nvmlDeviceGetHandleByUUID\000_nvmlSystemGetCudaDriverVersion\000_nvmlSystemGetCudaDriverVersion_v2\000_nvmlComputeInstanceDestroy\000_nvmlComputeInstanceGetInfo_v2\000_nvmlSystemGetConfComputeKeyRotationThresholdInfo\000_nvmlSystemSetConfComputeKeyRotationThresholdInfo\000_nvmlSystemGetConfComputeCapabilities\000_nvmlSystemGetConfComputeState\000_nvmlDeviceClearAccountingPids\000_nvmlDeviceClearCpuAffinity\000_nvmlDeviceResetGpuLockedClocks\000_nvmlDeviceResetMemoryLockedClocks\000_nvmlDeviceSetCpuAffinity\000_nvmlDeviceValidateInforom\000_nvmlDeviceGetGspFirmwareVersion\000_nvmlDeviceGetPgpuMetadataString\000_nvmlDeviceGetBoardPartNumber\000_nvmlDeviceGetInforomImageVersion\000_nvmlDeviceGetName\000_nvmlDeviceGetSerial\000_nvmlDeviceGetUUID\000_nvmlDeviceGetVbiosVersion\000_nvmlDeviceGetGpcClkVfOffset\000_nvmlDeviceGetMemClkVfOffset\000_nvmlDeviceGetCudaComputeCapability\000_nvmlDeviceGetGpcClkMinMaxVfOffset\000_nvmlDeviceGetMemClkMinMaxVfOffset\000_nvmlDeviceClearFieldValues\000_nvmlDeviceGetFieldValues\000_nvmlDeviceGetBAR1MemoryInfo\000_nvmlDeviceGetBrand\000_nvmlDeviceGetBridgeChipInfo\000_nvmlDeviceGetBusType\000_nvmlDeviceGetC2cModeInfoV\000_nvmlDeviceGetClkMonStatus\000_nvmlDeviceGetClockOffsets\000_nvmlDeviceSetClockOffsets\000_nvmlDeviceGetClock\000_nvmlDeviceGetMinMaxClockOfPState\000_nvmlDeviceGetClockInfo\000_nvmlDeviceGetMaxClockInfo\000_nvmlDeviceGetMaxCustomerBoostClock\000_nvmlDevic""eGetComputeMode\000_nvmlDeviceSetComputeMode\000_nvmlDeviceGetConfComputeGpuAttestationReport\000_nvmlDeviceGetConfComputeGpuCertificate\000_nvmlDeviceGetConfComputeMemSizeInfo\000_nvmlDeviceGetCoolerInfo\000_nvmlDeviceGetAddressingMode\000_nvmlDeviceGetArchitecture\000_nvmlDeviceGetAttributes_v2\000_nvmlDeviceGetCapabilities\000_nvmlDeviceGetCurrentClockFreqs\000_nvmlDeviceGetPerformanceModes\000_nvmlDeviceGetPowerMizerMode_v1\000_nvmlDeviceSetPowerMizerMode_v1\000_nvmlDeviceSetVgpuCapabilities\000_nvmlDeviceGetVgpuCapabilities\000_nvmlDeviceGetDeviceHandleFromMigDeviceHandle\000_nvmlDeviceOnSameBoard\000_nvmlDeviceGetP2PStatus\000_nvmlDeviceGetTopologyCommonAncestor\000_nvmlDeviceGetDramEncryptionMode\000_nvmlDeviceSetDramEncryptionMode\000_nvmlDeviceGetDriverModel_v2\000_nvmlDeviceSetDriverModel\000_nvmlDeviceClearEccErrorCounts\000_nvmlDeviceGetSramEccErrorStatus\000_nvmlDeviceGetSramUniqueUncorrectedEccErrorCounts\000_nvmlDeviceGetAccountingMode\000_nvmlDeviceGetDefaultEccMode\000_nvmlDeviceGetDisplayActive\000_nvmlDeviceGetDisplayMode\000_nvmlDeviceGetPersistenceMode\000_nvmlDeviceGetRetiredPagesPendingStatus\000_nvmlDeviceGetAutoBoostedClocksEnabled\000_nvmlDeviceGetEccMode\000_nvmlDeviceSetAccountingMode\000_nvmlDeviceSetAutoBoostedClocksEnabled\000_nvmlDeviceSetEccMode\000_nvmlDeviceSetPersistenceMode\000_nvmlDeviceSetDefaultAutoBoostedClocksEnabled\000_nvmlDeviceGetEncoderCapacity\000_nvmlDeviceGetFBCStats\000_nvmlDeviceGetFanSpeedRPM\000_nvmlGpmSampleGet\000_nvmlGpmQueryDeviceSupport\000_nvmlDeviceGetDynamicPstatesInfo\000_nvmlDeviceGetGpuFabricInfoV\000_nvmlDeviceGetGpuOperationMode\000_nvmlDeviceSetGpuOperationMode\000_nvmlDeviceGetTopologyNearestGpus\000_nvmlDeviceGetVirtualizationMode\000_nvmlDeviceSetVirtualizationMode\000_nvmlDeviceGetGridLicensableFeatures_v4\000_nvmlDeviceGetHostVgpuMode\000_nvmlDeviceGetHostname_v1\000_nvmlDeviceSetHostname_v1\000_nvmlDeviceGetInforomVersion\000_nvmlDeviceGetMarginTemperature\000_nvmlDeviceGetMemoryErrorCounter""\000_nvmlDeviceGetTotalEccErrors\000_nvmlDeviceGetConfComputeProtectedMemoryUsage\000_nvmlDeviceGetMemoryInfo_v2\000_nvmlDeviceGetNvLinkInfo\000_nvmlDeviceSetNvLinkDeviceLowPowerThreshold\000_nvmlDeviceGetNvlinkBwMode\000_nvmlDeviceSetNvlinkBwMode\000_nvmlDeviceGetNvlinkSupportedBwModes\000_nvmlDeviceReadWritePRM_v1\000_nvmlDeviceGetRetiredPages\000_nvmlDeviceGetRetiredPages_v2\000_nvmlDeviceGetPciInfoExt\000_nvmlDeviceGetPciInfo_v3\000_nvmlDeviceGetPcieThroughput\000_nvmlDeviceGetPdi\000_nvmlDeviceGetPlatformInfo\000_nvmlDevicePowerSmoothingActivatePresetProfile\000_nvmlDevicePowerSmoothingUpdatePresetProfileParam\000_nvmlDevicePowerSmoothingSetState\000_nvmlDeviceGetPowerSource\000_nvmlDeviceSetPowerManagementLimit_v2\000_nvmlDeviceGetRunningProcessDetailList\000_nvmlDeviceGetProcessUtilization\000_nvmlDeviceGetProcessesUtilizationInfo\000_nvmlDeviceGetPerformanceState\000_nvmlDeviceGetPowerState\000_nvmlDeviceGetSupportedPerformanceStates\000_nvmlDeviceGetRepairStatus\000_nvmlDeviceGetAPIRestriction\000_nvmlDeviceSetAPIRestriction\000_nvmlDeviceGetRowRemapperHistogram\000_nvmlDeviceGetSamples\000_nvmlDeviceSetTemperatureThreshold\000_nvmlDeviceGetTemperatureThreshold\000_nvmlDeviceGetTemperatureV\000_nvmlDeviceGetUtilizationRates\000_nvmlDeviceGetVgpuHeterogeneousMode\000_nvmlDeviceSetVgpuHeterogeneousMode\000_nvmlDeviceGetVgpuInstancesUtilizationInfo\000_nvmlDeviceGetVgpuMetadata\000_nvmlDeviceGetVgpuProcessesUtilizationInfo\000_nvmlDeviceGetVgpuSchedulerCapabilities\000_nvmlDeviceGetVgpuSchedulerState\000_nvmlDeviceGetVgpuSchedulerLog\000_nvmlDeviceSetVgpuSchedulerState\000_nvmlDeviceGetVgpuTypeCreatablePlacements\000_nvmlDeviceGetVgpuTypeSupportedPlacements\000_nvmlVgpuTypeGetMaxInstances\000_nvmlDeviceWorkloadPowerProfileClearRequestedProfiles\000_nvmlDeviceGetCurrentClocksEventReasons\000_nvmlDeviceGetSupportedClocksEventReasons\000_nvmlDeviceGetSupportedEventTypes\000_nvmlDeviceGetTotalEnergyConsumption\000_nvmlDeviceGetLastBBXFlushTime\000_nvmlDeviceSetCon""fComputeUnprotectedMemSize\000_nvmlDeviceRegisterEvents\000_nvmlDeviceGetVgpuUtilization\000_nvmlDeviceGetVgpuProcessUtilization\000_nvmlDeviceGetAccountingBufferSize\000_nvmlDeviceGetAdaptiveClockInfoStatus\000_nvmlDeviceGetBoardId\000_nvmlDeviceGetComputeInstanceId\000_nvmlDeviceGetCurrPcieLinkGeneration\000_nvmlDeviceGetCurrPcieLinkWidth\000_nvmlDeviceGetEnforcedPowerLimit\000_nvmlDeviceGetFanSpeed\000_nvmlDeviceGetGpuInstanceId\000_nvmlDeviceGetGpuMaxPcieLinkGeneration\000_nvmlDeviceGetIndex\000_nvmlDeviceGetInforomConfigurationChecksum\000_nvmlDeviceGetIrqNum\000_nvmlDeviceGetMaxMigDeviceCount\000_nvmlDeviceGetMaxPcieLinkGeneration\000_nvmlDeviceGetMaxPcieLinkWidth\000_nvmlDeviceGetMemoryBusWidth\000_nvmlDeviceGetMinorNumber\000_nvmlDeviceGetModuleId\000_nvmlDeviceGetMultiGpuBoard\000_nvmlDeviceGetNumFans\000_nvmlDeviceGetNumGpuCores\000_nvmlDeviceGetNumaNodeId\000_nvmlDeviceGetPcieLinkMaxSpeed\000_nvmlDeviceGetPcieReplayCounter\000_nvmlDeviceGetPcieSpeed\000_nvmlDeviceGetPowerManagementDefaultLimit\000_nvmlDeviceGetPowerManagementLimit\000_nvmlDeviceGetPowerUsage\000_nvmlDeviceIsMigDeviceHandle\000_nvmlGpmQueryIfStreamingEnabled\000_nvmlDeviceGetEncoderSessions\000_nvmlDeviceGetFBCSessions\000_nvmlDeviceGetComputeRunningProcesses_v3\000_nvmlDeviceGetMPSComputeRunningProcesses_v3\000_nvmlDeviceGetActiveVgpus\000_nvmlDeviceGetCreatableVgpus\000_nvmlDeviceGetSupportedVgpus\000_nvmlDeviceGetAccountingPids\000_nvmlDeviceGetDecoderUtilization\000_nvmlDeviceGetEncoderUtilization\000_nvmlDeviceGetGspFirmwareMode\000_nvmlDeviceGetJpgUtilization\000_nvmlDeviceGetMigMode\000_nvmlDeviceGetMinMaxFanSpeed\000_nvmlDeviceGetOfaUtilization\000_nvmlDeviceGetPowerManagementLimitConstraints\000_nvmlDeviceGetSupportedMemoryClocks\000_nvmlDeviceGetEncoderStats\000_nvmlDeviceGetRemappedRows\000_nvmlDeviceResetNvLinkErrorCounters\000_nvmlDeviceSetDefaultFanSpeed_v2\000_nvmlDeviceSetPowerManagementLimit\000_nvmlGpmSetStreamingEnabled\000_nvmlDeviceGetAccountingStats\000_nvmlDeviceGetM""igDeviceHandleByIndex\000_nvmlDeviceGetNvLinkState\000_nvmlDeviceGetFanControlPolicy_v2\000_nvmlDeviceSetFanControlPolicy\000_nvmlGpmMigSampleGet\000_nvmlDeviceGetGpuInstancePossiblePlacements_v2\000_nvmlDeviceCreateGpuInstanceWithPlacement\000_nvmlDeviceGetGpuInstanceProfileInfoByIdV\000_nvmlDeviceGetGpuInstanceProfileInfoV\000_nvmlDeviceCreateGpuInstance\000_nvmlDeviceGetGpuInstanceById\000_nvmlDeviceGetGpuInstances\000_nvmlDeviceGetThermalSettings\000_nvmlDeviceGetNvLinkRemoteDeviceType\000_nvmlDeviceGetNvLinkCapability\000_nvmlDeviceGetNvLinkErrorCounter\000_nvmlDeviceGetNvLinkRemotePciInfo_v2\000_nvmlDeviceSetMigMode\000_nvmlDeviceGetFanSpeed_v2\000_nvmlDeviceGetGpuInstanceRemainingCapacity\000_nvmlDeviceGetNvLinkVersion\000_nvmlDeviceGetTargetFanSpeed\000_nvmlDeviceGetSupportedGraphicsClocks\000_nvmlDeviceSetFanSpeed_v2\000_nvmlDeviceSetGpuLockedClocks\000_nvmlDeviceSetMemoryLockedClocks\000_nvmlDeviceGetCpuAffinity\000_nvmlDeviceGetCpuAffinityWithinScope\000_nvmlDeviceGetMemoryAffinity\000_nvmlEventSetCreate\000_nvmlEventSetFree\000_nvmlEventSetWait_v2\000_nvmlGpuInstanceDestroy\000_nvmlGpuInstanceGetActiveVgpus\000_nvmlGpuInstanceGetInfo\000_nvmlGpuInstanceGetVgpuTypeCreatablePlacements\000_nvmlGpuInstanceGetVgpuHeterogeneousMode\000_nvmlGpuInstanceSetVgpuHeterogeneousMode\000_nvmlGpuInstanceGetVgpuSchedulerLog\000_nvmlGpuInstanceGetVgpuSchedulerState\000_nvmlGpuInstanceSetVgpuSchedulerState\000_nvmlGpuInstanceGetCreatableVgpus\000_nvmlGpuInstanceGetComputeInstancePossiblePlacements\000_nvmlGpuInstanceCreateComputeInstanceWithPlacement\000_nvmlGpuInstanceCreateComputeInstance\000_nvmlGpuInstanceGetComputeInstanceById\000_nvmlGpuInstanceGetComputeInstances\000_nvmlGpuInstanceGetComputeInstanceRemainingCapacity\000_nvmlGpuInstanceGetComputeInstanceProfileInfoV\000_nvmlDeviceDiscoverGpus\000_nvmlDeviceRemoveGpu_v2\000_nvmlDeviceQueryDrainState\000_nvmlDeviceModifyDrainState\000_nvmlSystemGetConfComputeSettings\000_nvmlSystemGetDriverBranch\000_nvmlSystemEventSe""tCreate\000_nvmlSystemEventSetFree\000_nvmlSystemEventSetWait\000_nvmlSystemRegisterEvents\000_nvmlDeviceGetHandleByUUIDV\000_nvmlUnitSetLedState\000_nvmlUnitGetLedState\000_nvmlUnitGetPsuInfo\000_nvmlUnitGetFanSpeedInfo\000_nvmlUnitGetUnitInfo\000_nvmlUnitGetDevices\000_nvmlUnitGetTemperature\000_nvmlGetVgpuDriverCapabilities\000_nvmlVgpuInstanceClearAccountingPids\000_nvmlVgpuInstanceGetGpuPciId\000_nvmlVgpuInstanceGetMdevUUID\000_nvmlVgpuInstanceGetUUID\000_nvmlVgpuInstanceGetVmDriverVersion\000_nvmlVgpuInstanceGetVmID\000_nvmlVgpuInstanceGetAccountingMode\000_nvmlVgpuInstanceGetEccMode\000_nvmlVgpuInstanceGetFBCStats\000_nvmlVgpuInstanceGetLicenseInfo_v2\000_nvmlVgpuInstanceGetMetadata\000_nvmlVgpuInstanceGetPlacementId\000_nvmlVgpuInstanceGetRuntimeStateSize\000_nvmlVgpuInstanceGetType\000_nvmlVgpuInstanceGetFbUsage\000_nvmlVgpuInstanceGetEncoderCapacity\000_nvmlVgpuInstanceGetFrameRateLimit\000_nvmlVgpuInstanceGetGpuInstanceId\000_nvmlVgpuInstanceGetLicenseStatus\000_nvmlVgpuInstanceGetEncoderSessions\000_nvmlVgpuInstanceGetFBCSessions\000_nvmlVgpuInstanceGetAccountingPids\000_nvmlVgpuInstanceGetEncoderStats\000_nvmlVgpuInstanceSetEncoderCapacity\000_nvmlVgpuInstanceGetAccountingStats\000_nvmlGetVgpuCompatibility\000_nvmlVgpuTypeGetClass\000_nvmlVgpuTypeGetName\000_nvmlVgpuTypeGetLicense\000_nvmlVgpuTypeGetCapabilities\000_nvmlVgpuTypeGetBAR1Info\000_nvmlVgpuTypeGetFbReservation\000_nvmlVgpuTypeGetFramebufferSize\000_nvmlVgpuTypeGetGspHeapSize\000_nvmlVgpuTypeGetDeviceID\000_nvmlVgpuTypeGetFrameRateLimit\000_nvmlVgpuTypeGetGpuInstanceProfileId\000_nvmlVgpuTypeGetMaxInstancesPerVm\000_nvmlVgpuTypeGetNumDisplayHeads\000_nvmlVgpuTypeGetResolution\000_nvmlVgpuTypeGetMaxInstancesPerGpuInstance\000_nvmlSetVgpuVersion\000_nvmlGetVgpuVersion\000_nvmlDeviceGetCount_v2\000_nvmlGetExcludedDeviceCount\000_nvmlSystemGetConfComputeGpusReadyState\000_nvmlSystemGetNvlinkBwMode\000_nvmlUnitGetCount\000_nvmlSystemGetHicVersion\000_nvmlInitWithFlags\000_nvmlSystemSetConfComputeG""pusReadyState\000_nvmlSystemSetNvlinkBwMode\000_nvmlSystemGetProcessName\000_nvmlDeviceGetHandleByIndex_v2\000_nvmlGetExcludedDeviceInfoByIndex\000_nvmlUnitGetHandleByIndex\000_nvmlSystemGetTopologyGpuSet\000_nvmlInit_v2\000_nvmlShutdown";
    PyObject *data = NULL;
    CYTHON_UNUSED_VAR(__Pyx_DecompressString);
    #endif
    PyObject **stringtab = __pyx_mstate->__pyx_string_tab;
    Py_ssize_t pos = 0;
    for (int i = 0; i < 9; i++) {
      Py_ssize_t bytes_length = index[i].length;
      PyObject *string = PyUnicode_DecodeUTF8(bytes + pos, bytes_length, NULL);
      if (likely(string) && i >= 1) PyUnicode_InternInPlace(&string);
      if (unlikely(!string)) {
        Py_XDECREF(data);
        __PYX_ERR(0, 1, __pyx_L1_error)
      }
      stringtab[i] = string;
      pos += bytes_length;
    }
    for (int i = 9; i < 11; i++) {
      Py_ssize_t bytes_length = index[i].length;
      PyObject *string = PyBytes_FromStringAndSize(bytes + pos, bytes_length);
      stringtab[i] = string;
      pos += bytes_length;
      if (unlikely(!string)) {
        Py_XDECREF(data);
        __PYX_ERR(0, 1, __pyx_L1_error)
      }
    }
    Py_XDECREF(data);
    for (Py_ssize_t i = 0; i < 11; i++) {
      if (unlikely(PyObject_Hash(stringtab[i]) == -1)) {
        __PYX_ERR(0, 1, __pyx_L1_error)
      }
    }
    #if CYTHON_IMMORTAL_CONSTANTS
    {
      PyObject **table = stringtab + 9;
      for (Py_ssize_t i=0; i<2; ++i) {
        #if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
        Py_SET_REFCNT(table[i], _Py_IMMORTAL_REFCNT_LOCAL);
        #else
        Py_SET_REFCNT(table[i], _Py_IMMORTAL_INITIAL_REFCNT);
        #endif
      }
    }
    #endif
  }
  return 0;
  __pyx_L1_error:;
  return -1;
}
/* #### Code section: init_codeobjects ### */

static int __Pyx_CreateCodeObjects(__pyx_mstatetype *__pyx_mstate) {
  CYTHON_UNUSED_VAR(__pyx_mstate);
  return 0;
}
/* #### Code section: init_globals ### */

static int __Pyx_InitGlobals(void) {
  /* PythonCompatibility.init */
  if (likely(__Pyx_init_co_variables() == 0)); else
  
  if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)

  return 0;
  __pyx_L1_error:;
  return -1;
}
/* #### Code section: cleanup_globals ### */
/* #### Code section: cleanup_module ### */
/* #### Code section: main_method ### */
/* #### Code section: utility_code_pragmas ### */
#ifdef _MSC_VER
#pragma warning( push )
/* Warning 4127: conditional expression is constant
 * Cython uses constant conditional expressions to allow in inline functions to be optimized at
 * compile-time, so this warning is not useful
 */
#pragma warning( disable : 4127 )
#endif



/* #### Code section: utility_code_def ### */

/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
    PyObject *m = NULL, *p = NULL;
    void *r = NULL;
    m = PyImport_ImportModule(modname);
    if (!m) goto end;
    p = PyObject_GetAttrString(m, "RefNannyAPI");
    if (!p) goto end;
    r = PyLong_AsVoidPtr(p);
end:
    Py_XDECREF(p);
    Py_XDECREF(m);
    return (__Pyx_RefNannyAPIStruct *)r;
}
#endif

/* ErrOccurredWithGIL */
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
  int err;
  PyGILState_STATE _save = PyGILState_Ensure();
  err = !!PyErr_Occurred();
  PyGILState_Release(_save);
  return err;
}

/* FunctionExport */
static int __Pyx_ExportFunction(PyObject *api_dict, const char *name, void (*f)(void), const char *sig) {
    PyObject *cobj;
    union {
        void (*fp)(void);
        void *p;
    } tmp;
    tmp.fp = f;
    cobj = PyCapsule_New(tmp.p, sig, 0);
    if (!cobj)
        goto bad;
    if (PyDict_SetItemString(api_dict, name, cobj) < 0)
        goto bad;
    Py_DECREF(cobj);
    return 0;
bad:
    Py_XDECREF(cobj);
    return -1;
}

/* GetApiDict */
static PyObject *__Pyx_ApiExport_GetApiDict(void) {
    PyObject *d;
    if (__Pyx_PyDict_GetItemRef(__pyx_mstate_global->__pyx_d, __pyx_mstate_global->__pyx_n_u_pyx_capi, &d) == -1)
        return NULL;
    if (!d) {
        d = PyDict_New();
        if (!d)
            goto bad;
        if (PyObject_SetAttr(__pyx_m, __pyx_mstate_global->__pyx_n_u_pyx_capi, d) < 0)
            goto bad;
    }
    return d;
bad:
    Py_XDECREF(d);
    return NULL;
}

/* PxdImportShared (used by FunctionImport) */
#ifndef __PYX_HAVE_RT_ImportFromPxd_3_2_2
#define __PYX_HAVE_RT_ImportFromPxd_3_2_2
static int __Pyx_ImportFromPxd_3_2_2(PyObject *module, const char *name, void **p, const char *sig, const char *what) {
    PyObject *d = 0;
    PyObject *cobj = 0;
    d = PyObject_GetAttrString(module, "__pyx_capi__");
    if (!d)
        goto bad;
#if (defined(Py_LIMITED_API) && Py_LIMITED_API >= 0x030d0000) || (!defined(Py_LIMITED_API) && PY_VERSION_HEX >= 0x030d0000)
    PyDict_GetItemStringRef(d, name, &cobj);
#else
    cobj = PyDict_GetItemString(d, name);
    Py_XINCREF(cobj);
#endif
    if (!cobj) {
        PyErr_Format(PyExc_ImportError,
            "%.200s does not export expected C %.8s %.200s",
                PyModule_GetName(module), what, name);
        goto bad;
    }
    if (!PyCapsule_IsValid(cobj, sig)) {
        PyErr_Format(PyExc_TypeError,
            "C %.8s %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
             what, PyModule_GetName(module), name, sig, PyCapsule_GetName(cobj));
        goto bad;
    }
    *p = PyCapsule_GetPointer(cobj, sig);
    if (!(*p))
        goto bad;
    Py_DECREF(d);
    Py_DECREF(cobj);
    return 0;
bad:
    Py_XDECREF(d);
    Py_XDECREF(cobj);
    return -1;
}
#endif

/* FunctionImport */
#ifndef __PYX_HAVE_RT_ImportFunction_3_2_2
#define __PYX_HAVE_RT_ImportFunction_3_2_2
static int __Pyx_ImportFunction_3_2_2(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
    union {
        void (*fp)(void);
        void *p;
    } tmp;
    int result = __Pyx_ImportFromPxd_3_2_2(module, funcname, &tmp.p, sig, "function");
    if (result == 0) {
        *f = tmp.fp;
    }
    return result;
}
#endif

/* dict_setdefault (used by CLineInTraceback) */
static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value) {
    PyObject* value;
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX >= 0x030C0000
    PyObject *args[] = {d, key, default_value};
    value = PyObject_VectorcallMethod(__pyx_mstate_global->__pyx_n_u_setdefault, args, 3 | PY_VECTORCALL_ARGUMENTS_OFFSET, NULL);
#elif CYTHON_COMPILING_IN_LIMITED_API
    value = PyObject_CallMethodObjArgs(d, __pyx_mstate_global->__pyx_n_u_setdefault, key, default_value, NULL);
#elif PY_VERSION_HEX >= 0x030d0000
    PyDict_SetDefaultRef(d, key, default_value, &value);
#else
    value = PyDict_SetDefault(d, key, default_value);
    if (unlikely(!value)) return NULL;
    Py_INCREF(value);
#endif
    return value;
}

/* PyDictVersioning (used by CLineInTraceback) */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
    PyObject *dict = Py_TYPE(obj)->tp_dict;
    return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
    PyObject **dictptr = NULL;
    Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
    if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
        dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
        dictptr = _PyObject_GetDictPtr(obj);
#endif
    }
    return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
    PyObject *dict = Py_TYPE(obj)->tp_dict;
    if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
        return 0;
    return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif

/* PyErrExceptionMatches (used by PyObjectGetAttrStrNoError) */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
    Py_ssize_t i, n;
    n = PyTuple_GET_SIZE(tuple);
    for (i=0; i<n; i++) {
        if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
    }
    for (i=0; i<n; i++) {
        if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
    }
    return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
    int result;
    PyObject *exc_type;
#if PY_VERSION_HEX >= 0x030C00A6
    PyObject *current_exception = tstate->current_exception;
    if (unlikely(!current_exception)) return 0;
    exc_type = (PyObject*) Py_TYPE(current_exception);
    if (exc_type == err) return 1;
#else
    exc_type = tstate->curexc_type;
    if (exc_type == err) return 1;
    if (unlikely(!exc_type)) return 0;
#endif
    #if CYTHON_AVOID_BORROWED_REFS
    Py_INCREF(exc_type);
    #endif
    if (unlikely(PyTuple_Check(err))) {
        result = __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
    } else {
        result = __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
    }
    #if CYTHON_AVOID_BORROWED_REFS
    Py_DECREF(exc_type);
    #endif
    return result;
}
#endif

/* PyErrFetchRestore (used by PyObjectGetAttrStrNoError) */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
#if PY_VERSION_HEX >= 0x030C00A6
    PyObject *tmp_value;
    assert(type == NULL || (value != NULL && type == (PyObject*) Py_TYPE(value)));
    if (value) {
        #if CYTHON_COMPILING_IN_CPYTHON
        if (unlikely(((PyBaseExceptionObject*) value)->traceback != tb))
        #endif
            PyException_SetTraceback(value, tb);
    }
    tmp_value = tstate->current_exception;
    tstate->current_exception = value;
    Py_XDECREF(tmp_value);
    Py_XDECREF(type);
    Py_XDECREF(tb);
#else
    PyObject *tmp_type, *tmp_value, *tmp_tb;
    tmp_type = tstate->curexc_type;
    tmp_value = tstate->curexc_value;
    tmp_tb = tstate->curexc_traceback;
    tstate->curexc_type = type;
    tstate->curexc_value = value;
    tstate->curexc_traceback = tb;
    Py_XDECREF(tmp_type);
    Py_XDECREF(tmp_value);
    Py_XDECREF(tmp_tb);
#endif
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if PY_VERSION_HEX >= 0x030C00A6
    PyObject* exc_value;
    exc_value = tstate->current_exception;
    tstate->current_exception = 0;
    *value = exc_value;
    *type = NULL;
    *tb = NULL;
    if (exc_value) {
        *type = (PyObject*) Py_TYPE(exc_value);
        Py_INCREF(*type);
        #if CYTHON_COMPILING_IN_CPYTHON
        *tb = ((PyBaseExceptionObject*) exc_value)->traceback;
        Py_XINCREF(*tb);
        #else
        *tb = PyException_GetTraceback(exc_value);
        #endif
    }
#else
    *type = tstate->curexc_type;
    *value = tstate->curexc_value;
    *tb = tstate->curexc_traceback;
    tstate->curexc_type = 0;
    tstate->curexc_value = 0;
    tstate->curexc_traceback = 0;
#endif
}
#endif

/* PyObjectGetAttrStr (used by PyObjectGetAttrStrNoError) */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
    PyTypeObject* tp = Py_TYPE(obj);
    if (likely(tp->tp_getattro))
        return tp->tp_getattro(obj, attr_name);
    return PyObject_GetAttr(obj, attr_name);
}
#endif

/* PyObjectGetAttrStrNoError (used by CLineInTraceback) */
#if __PYX_LIMITED_VERSION_HEX < 0x030d0000
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
    __Pyx_PyThreadState_declare
    __Pyx_PyThreadState_assign
    if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
        __Pyx_PyErr_Clear();
}
#endif
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
    PyObject *result;
#if __PYX_LIMITED_VERSION_HEX >= 0x030d0000
    (void) PyObject_GetOptionalAttr(obj, attr_name, &result);
    return result;
#else
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS
    PyTypeObject* tp = Py_TYPE(obj);
    if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
        return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
    }
#endif
    result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
    if (unlikely(!result)) {
        __Pyx_PyObject_GetAttrStr_ClearAttributeError();
    }
    return result;
#endif
}

/* CLineInTraceback (used by AddTraceback) */
#if CYTHON_CLINE_IN_TRACEBACK && CYTHON_CLINE_IN_TRACEBACK_RUNTIME
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030A0000
#define __Pyx_PyProbablyModule_GetDict(o) __Pyx_XNewRef(PyModule_GetDict(o))
#elif !CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
#define __Pyx_PyProbablyModule_GetDict(o) PyObject_GenericGetDict(o, NULL);
#else
PyObject* __Pyx_PyProbablyModule_GetDict(PyObject *o) {
    PyObject **dict_ptr = _PyObject_GetDictPtr(o);
    return dict_ptr ? __Pyx_XNewRef(*dict_ptr) : NULL;
}
#endif
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) {
    PyObject *use_cline = NULL;
    PyObject *ptype, *pvalue, *ptraceback;
    PyObject *cython_runtime_dict;
    CYTHON_MAYBE_UNUSED_VAR(tstate);
    if (unlikely(!__pyx_mstate_global->__pyx_cython_runtime)) {
        return c_line;
    }
    __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
    cython_runtime_dict = __Pyx_PyProbablyModule_GetDict(__pyx_mstate_global->__pyx_cython_runtime);
    if (likely(cython_runtime_dict)) {
        __PYX_PY_DICT_LOOKUP_IF_MODIFIED(
            use_cline, cython_runtime_dict,
            __Pyx_PyDict_SetDefault(cython_runtime_dict, __pyx_mstate_global->__pyx_n_u_cline_in_traceback, Py_False))
    }
    if (use_cline == NULL || use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
        c_line = 0;
    }
    Py_XDECREF(use_cline);
    Py_XDECREF(cython_runtime_dict);
    __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
    return c_line;
}
#endif

/* CodeObjectCache (used by AddTraceback) */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
    int start = 0, mid = 0, end = count - 1;
    if (end >= 0 && code_line > entries[end].code_line) {
        return count;
    }
    while (start < end) {
        mid = start + (end - start) / 2;
        if (code_line < entries[mid].code_line) {
            end = mid;
        } else if (code_line > entries[mid].code_line) {
             start = mid + 1;
        } else {
            return mid;
        }
    }
    if (code_line <= entries[mid].code_line) {
        return mid;
    } else {
        return mid + 1;
    }
}
static __Pyx_CachedCodeObjectType *__pyx__find_code_object(struct __Pyx_CodeObjectCache *code_cache, int code_line) {
    __Pyx_CachedCodeObjectType* code_object;
    int pos;
    if (unlikely(!code_line) || unlikely(!code_cache->entries)) {
        return NULL;
    }
    pos = __pyx_bisect_code_objects(code_cache->entries, code_cache->count, code_line);
    if (unlikely(pos >= code_cache->count) || unlikely(code_cache->entries[pos].code_line != code_line)) {
        return NULL;
    }
    code_object = code_cache->entries[pos].code_object;
    Py_INCREF(code_object);
    return code_object;
}
static __Pyx_CachedCodeObjectType *__pyx_find_code_object(int code_line) {
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && !CYTHON_ATOMICS
    (void)__pyx__find_code_object;
    return NULL; // Most implementation should have atomics. But otherwise, don't make it thread-safe, just miss.
#else
    struct __Pyx_CodeObjectCache *code_cache = &__pyx_mstate_global->__pyx_code_cache;
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    __pyx_nonatomic_int_type old_count = __pyx_atomic_incr_acq_rel(&code_cache->accessor_count);
    if (old_count < 0) {
        __pyx_atomic_decr_acq_rel(&code_cache->accessor_count);
        return NULL;
    }
#endif
    __Pyx_CachedCodeObjectType *result = __pyx__find_code_object(code_cache, code_line);
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    __pyx_atomic_decr_acq_rel(&code_cache->accessor_count);
#endif
    return result;
#endif
}
static void __pyx__insert_code_object(struct __Pyx_CodeObjectCache *code_cache, int code_line, __Pyx_CachedCodeObjectType* code_object)
{
    int pos, i;
    __Pyx_CodeObjectCacheEntry* entries = code_cache->entries;
    if (unlikely(!code_line)) {
        return;
    }
    if (unlikely(!entries)) {
        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
        if (likely(entries)) {
            code_cache->entries = entries;
            code_cache->max_count = 64;
            code_cache->count = 1;
            entries[0].code_line = code_line;
            entries[0].code_object = code_object;
            Py_INCREF(code_object);
        }
        return;
    }
    pos = __pyx_bisect_code_objects(code_cache->entries, code_cache->count, code_line);
    if ((pos < code_cache->count) && unlikely(code_cache->entries[pos].code_line == code_line)) {
        __Pyx_CachedCodeObjectType* tmp = entries[pos].code_object;
        entries[pos].code_object = code_object;
        Py_INCREF(code_object);
        Py_DECREF(tmp);
        return;
    }
    if (code_cache->count == code_cache->max_count) {
        int new_max = code_cache->max_count + 64;
        entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
            code_cache->entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
        if (unlikely(!entries)) {
            return;
        }
        code_cache->entries = entries;
        code_cache->max_count = new_max;
    }
    for (i=code_cache->count; i>pos; i--) {
        entries[i] = entries[i-1];
    }
    entries[pos].code_line = code_line;
    entries[pos].code_object = code_object;
    code_cache->count++;
    Py_INCREF(code_object);
}
static void __pyx_insert_code_object(int code_line, __Pyx_CachedCodeObjectType* code_object) {
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING && !CYTHON_ATOMICS
    (void)__pyx__insert_code_object;
    return; // Most implementation should have atomics. But otherwise, don't make it thread-safe, just fail.
#else
    struct __Pyx_CodeObjectCache *code_cache = &__pyx_mstate_global->__pyx_code_cache;
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    __pyx_nonatomic_int_type expected = 0;
    if (!__pyx_atomic_int_cmp_exchange(&code_cache->accessor_count, &expected, INT_MIN)) {
        return;
    }
#endif
    __pyx__insert_code_object(code_cache, code_line, code_object);
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
    __pyx_atomic_sub(&code_cache->accessor_count, INT_MIN);
#endif
#endif
}

/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
#if PY_VERSION_HEX >= 0x030b00a6 && !CYTHON_COMPILING_IN_LIMITED_API && !defined(PYPY_VERSION)
  #ifndef Py_BUILD_CORE
    #define Py_BUILD_CORE 1
  #endif
  #include "internal/pycore_frame.h"
#endif
#if CYTHON_COMPILING_IN_LIMITED_API
static PyObject *__Pyx_PyCode_Replace_For_AddTraceback(PyObject *code, PyObject *scratch_dict,
                                                       PyObject *firstlineno, PyObject *name) {
    PyObject *replace = NULL;
    if (unlikely(PyDict_SetItemString(scratch_dict, "co_firstlineno", firstlineno))) return NULL;
    if (unlikely(PyDict_SetItemString(scratch_dict, "co_name", name))) return NULL;
    replace = PyObject_GetAttrString(code, "replace");
    if (likely(replace)) {
        PyObject *result = PyObject_Call(replace, __pyx_mstate_global->__pyx_empty_tuple, scratch_dict);
        Py_DECREF(replace);
        return result;
    }
    PyErr_Clear();
    return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
                               int py_line, const char *filename) {
    PyObject *code_object = NULL, *py_py_line = NULL, *py_funcname = NULL, *dict = NULL;
    PyObject *replace = NULL, *getframe = NULL, *frame = NULL;
    PyObject *exc_type, *exc_value, *exc_traceback;
    int success = 0;
    if (c_line) {
        c_line = __Pyx_CLineForTraceback(__Pyx_PyThreadState_Current, c_line);
    }
    PyErr_Fetch(&exc_type, &exc_value, &exc_traceback);
    code_object = __pyx_find_code_object(c_line ? -c_line : py_line);
    if (!code_object) {
        code_object = Py_CompileString("_getframe()", filename, Py_eval_input);
        if (unlikely(!code_object)) goto bad;
        py_py_line = PyLong_FromLong(py_line);
        if (unlikely(!py_py_line)) goto bad;
        if (c_line) {
            py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
        } else {
            py_funcname = PyUnicode_FromString(funcname);
        }
        if (unlikely(!py_funcname)) goto bad;
        dict = PyDict_New();
        if (unlikely(!dict)) goto bad;
        {
            PyObject *old_code_object = code_object;
            code_object = __Pyx_PyCode_Replace_For_AddTraceback(code_object, dict, py_py_line, py_funcname);
            Py_DECREF(old_code_object);
        }
        if (unlikely(!code_object)) goto bad;
        __pyx_insert_code_object(c_line ? -c_line : py_line, code_object);
    } else {
        dict = PyDict_New();
    }
    getframe = PySys_GetObject("_getframe");
    if (unlikely(!getframe)) goto bad;
    if (unlikely(PyDict_SetItemString(dict, "_getframe", getframe))) goto bad;
    frame = PyEval_EvalCode(code_object, dict, dict);
    if (unlikely(!frame) || frame == Py_None) goto bad;
    success = 1;
  bad:
    PyErr_Restore(exc_type, exc_value, exc_traceback);
    Py_XDECREF(code_object);
    Py_XDECREF(py_py_line);
    Py_XDECREF(py_funcname);
    Py_XDECREF(dict);
    Py_XDECREF(replace);
    if (success) {
        PyTraceBack_Here(
            (struct _frame*)frame);
    }
    Py_XDECREF(frame);
}
#else
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
            const char *funcname, int c_line,
            int py_line, const char *filename) {
    PyCodeObject *py_code = NULL;
    PyObject *py_funcname = NULL;
    if (c_line) {
        py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
        if (!py_funcname) goto bad;
        funcname = PyUnicode_AsUTF8(py_funcname);
        if (!funcname) goto bad;
    }
    py_code = PyCode_NewEmpty(filename, funcname, py_line);
    Py_XDECREF(py_funcname);
    return py_code;
bad:
    Py_XDECREF(py_funcname);
    return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
                               int py_line, const char *filename) {
    PyCodeObject *py_code = 0;
    PyFrameObject *py_frame = 0;
    PyThreadState *tstate = __Pyx_PyThreadState_Current;
    PyObject *ptype, *pvalue, *ptraceback;
    if (c_line) {
        c_line = __Pyx_CLineForTraceback(tstate, c_line);
    }
    py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
    if (!py_code) {
        __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
        py_code = __Pyx_CreateCodeObjectForTraceback(
            funcname, c_line, py_line, filename);
        if (!py_code) {
            /* If the code object creation fails, then we should clear the
               fetched exception references and propagate the new exception */
            Py_XDECREF(ptype);
            Py_XDECREF(pvalue);
            Py_XDECREF(ptraceback);
            goto bad;
        }
        __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
        __pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
    }
    py_frame = PyFrame_New(
        tstate,            /*PyThreadState *tstate,*/
        py_code,           /*PyCodeObject *code,*/
        __pyx_mstate_global->__pyx_d,    /*PyObject *globals,*/
        0                  /*PyObject *locals*/
    );
    if (!py_frame) goto bad;
    __Pyx_PyFrame_SetLineNumber(py_frame, py_line);
    PyTraceBack_Here(py_frame);
bad:
    Py_XDECREF(py_code);
    Py_XDECREF(py_frame);
}
#endif

/* FormatTypeName */
#if CYTHON_COMPILING_IN_LIMITED_API && __PYX_LIMITED_VERSION_HEX < 0x030d0000
static __Pyx_TypeName
__Pyx_PyType_GetFullyQualifiedName(PyTypeObject* tp)
{
    PyObject *module = NULL, *name = NULL, *result = NULL;
    #if __PYX_LIMITED_VERSION_HEX < 0x030b0000
    name = __Pyx_PyObject_GetAttrStr((PyObject *)tp,
                                               __pyx_mstate_global->__pyx_n_u_qualname);
    #else
    name = PyType_GetQualName(tp);
    #endif
    if (unlikely(name == NULL) || unlikely(!PyUnicode_Check(name))) goto bad;
    module = __Pyx_PyObject_GetAttrStr((PyObject *)tp,
                                               __pyx_mstate_global->__pyx_n_u_module);
    if (unlikely(module == NULL) || unlikely(!PyUnicode_Check(module))) goto bad;
    if (PyUnicode_CompareWithASCIIString(module, "builtins") == 0) {
        result = name;
        name = NULL;
        goto done;
    }
    result = PyUnicode_FromFormat("%U.%U", module, name);
    if (unlikely(result == NULL)) goto bad;
  done:
    Py_XDECREF(name);
    Py_XDECREF(module);
    return result;
  bad:
    PyErr_Clear();
    if (name) {
        result = name;
        name = NULL;
    } else {
        result = __Pyx_NewRef(__pyx_mstate_global->__pyx_kp_u_);
    }
    goto done;
}
#endif

/* PyObjectCall (used by PyObjectFastCall) */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
    PyObject *result;
    ternaryfunc call = Py_TYPE(func)->tp_call;
    if (unlikely(!call))
        return PyObject_Call(func, arg, kw);
    if (unlikely(Py_EnterRecursiveCall(" while calling a Python object")))
        return NULL;
    result = (*call)(func, arg, kw);
    Py_LeaveRecursiveCall();
    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
        PyErr_SetString(
            PyExc_SystemError,
            "NULL result without error in PyObject_Call");
    }
    return result;
}
#endif

/* PyObjectCallMethO (used by PyObjectFastCall) */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
    PyObject *self, *result;
    PyCFunction cfunc;
    cfunc = __Pyx_CyOrPyCFunction_GET_FUNCTION(func);
    self = __Pyx_CyOrPyCFunction_GET_SELF(func);
    if (unlikely(Py_EnterRecursiveCall(" while calling a Python object")))
        return NULL;
    result = cfunc(self, arg);
    Py_LeaveRecursiveCall();
    if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
        PyErr_SetString(
            PyExc_SystemError,
            "NULL result without error in PyObject_Call");
    }
    return result;
}
#endif

/* PyObjectFastCall (used by PyObjectVectorCallKwBuilder) */
#if PY_VERSION_HEX < 0x03090000 || CYTHON_COMPILING_IN_LIMITED_API
static PyObject* __Pyx_PyObject_FastCall_fallback(PyObject *func, PyObject * const*args, size_t nargs, PyObject *kwargs) {
    PyObject *argstuple;
    PyObject *result = 0;
    size_t i;
    argstuple = PyTuple_New((Py_ssize_t)nargs);
    if (unlikely(!argstuple)) return NULL;
    for (i = 0; i < nargs; i++) {
        Py_INCREF(args[i]);
        if (__Pyx_PyTuple_SET_ITEM(argstuple, (Py_ssize_t)i, args[i]) != (0)) goto bad;
    }
    result = __Pyx_PyObject_Call(func, argstuple, kwargs);
  bad:
    Py_DECREF(argstuple);
    return result;
}
#endif
#if CYTHON_VECTORCALL && !CYTHON_COMPILING_IN_LIMITED_API
  #if PY_VERSION_HEX < 0x03090000
    #define __Pyx_PyVectorcall_Function(callable) _PyVectorcall_Function(callable)
  #elif CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE vectorcallfunc __Pyx_PyVectorcall_Function(PyObject *callable) {
    PyTypeObject *tp = Py_TYPE(callable);
    #if defined(__Pyx_CyFunction_USED)
    if (__Pyx_CyFunction_CheckExact(callable)) {
        return __Pyx_CyFunction_func_vectorcall(callable);
    }
    #endif
    if (!PyType_HasFeature(tp, Py_TPFLAGS_HAVE_VECTORCALL)) {
        return NULL;
    }
    assert(PyCallable_Check(callable));
    Py_ssize_t offset = tp->tp_vectorcall_offset;
    assert(offset > 0);
    vectorcallfunc ptr;
    memcpy(&ptr, (char *) callable + offset, sizeof(ptr));
    return ptr;
}
  #else
    #define __Pyx_PyVectorcall_Function(callable) PyVectorcall_Function(callable)
  #endif
#endif
static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject *const *args, size_t _nargs, PyObject *kwargs) {
    Py_ssize_t nargs = __Pyx_PyVectorcall_NARGS(_nargs);
#if CYTHON_COMPILING_IN_CPYTHON
    if (nargs == 0 && kwargs == NULL) {
        if (__Pyx_CyOrPyCFunction_Check(func) && likely( __Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_NOARGS))
            return __Pyx_PyObject_CallMethO(func, NULL);
    }
    else if (nargs == 1 && kwargs == NULL) {
        if (__Pyx_CyOrPyCFunction_Check(func) && likely( __Pyx_CyOrPyCFunction_GET_FLAGS(func) & METH_O))
            return __Pyx_PyObject_CallMethO(func, args[0]);
    }
#endif
    if (kwargs == NULL) {
        #if CYTHON_VECTORCALL
          #if CYTHON_COMPILING_IN_LIMITED_API
            return PyObject_Vectorcall(func, args, _nargs, NULL);
          #else
            vectorcallfunc f = __Pyx_PyVectorcall_Function(func);
            if (f) {
                return f(func, args, _nargs, NULL);
            }
          #endif
        #endif
    }
    if (nargs == 0) {
        return __Pyx_PyObject_Call(func, __pyx_mstate_global->__pyx_empty_tuple, kwargs);
    }
    #if PY_VERSION_HEX >= 0x03090000 && !CYTHON_COMPILING_IN_LIMITED_API
    return PyObject_VectorcallDict(func, args, (size_t)nargs, kwargs);
    #else
    return __Pyx_PyObject_FastCall_fallback(func, args, (size_t)nargs, kwargs);
    #endif
}

/* PyObjectVectorCallKwBuilder (used by CIntToPy) */
#if CYTHON_VECTORCALL
static int __Pyx_VectorcallBuilder_AddArg(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n) {
    (void)__Pyx_PyObject_FastCallDict;
    if (__Pyx_PyTuple_SET_ITEM(builder, n, key) != (0)) return -1;
    Py_INCREF(key);
    args[n] = value;
    return 0;
}
CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, PyObject **args, int n) {
    (void)__Pyx_VectorcallBuilder_AddArgStr;
    if (unlikely(!PyUnicode_Check(key))) {
        PyErr_SetString(PyExc_TypeError, "keywords must be strings");
        return -1;
    }
    return __Pyx_VectorcallBuilder_AddArg(key, value, builder, args, n);
}
static int __Pyx_VectorcallBuilder_AddArgStr(const char *key, PyObject *value, PyObject *builder, PyObject **args, int n) {
    PyObject *pyKey = PyUnicode_FromString(key);
    if (!pyKey) return -1;
    return __Pyx_VectorcallBuilder_AddArg(pyKey, value, builder, args, n);
}
#else // CYTHON_VECTORCALL
CYTHON_UNUSED static int __Pyx_VectorcallBuilder_AddArg_Check(PyObject *key, PyObject *value, PyObject *builder, CYTHON_UNUSED PyObject **args, CYTHON_UNUSED int n) {
    if (unlikely(!PyUnicode_Check(key))) {
        PyErr_SetString(PyExc_TypeError, "keywords must be strings");
        return -1;
    }
    return PyDict_SetItem(builder, key, value);
}
#endif

/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyLong_From_long(long value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (is_unsigned) {
        if (sizeof(long) < sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(long) <= sizeof(unsigned long)) {
            return PyLong_FromUnsignedLong((unsigned long) value);
#if !CYTHON_COMPILING_IN_PYPY
        } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
            return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
        }
    } else {
        if (sizeof(long) <= sizeof(long)) {
            return PyLong_FromLong((long) value);
        } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
            return PyLong_FromLongLong((PY_LONG_LONG) value);
        }
    }
    {
        unsigned char *bytes = (unsigned char *)&value;
#if !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d00A4
        if (is_unsigned) {
            return PyLong_FromUnsignedNativeBytes(bytes, sizeof(value), -1);
        } else {
            return PyLong_FromNativeBytes(bytes, sizeof(value), -1);
        }
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030d0000
        int one = 1; int little = (int)*(unsigned char *)&one;
        return _PyLong_FromByteArray(bytes, sizeof(long),
                                     little, !is_unsigned);
#else
        int one = 1; int little = (int)*(unsigned char *)&one;
        PyObject *from_bytes, *result = NULL, *kwds = NULL;
        PyObject *py_bytes = NULL, *order_str = NULL;
        from_bytes = PyObject_GetAttrString((PyObject*)&PyLong_Type, "from_bytes");
        if (!from_bytes) return NULL;
        py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(long));
        if (!py_bytes) goto limited_bad;
        order_str = PyUnicode_FromString(little ? "little" : "big");
        if (!order_str) goto limited_bad;
        {
            PyObject *args[3+(CYTHON_VECTORCALL ? 1 : 0)] = { NULL, py_bytes, order_str };
            if (!is_unsigned) {
                kwds = __Pyx_MakeVectorcallBuilderKwds(1);
                if (!kwds) goto limited_bad;
                if (__Pyx_VectorcallBuilder_AddArgStr("signed", __Pyx_NewRef(Py_True), kwds, args+3, 0) < 0) goto limited_bad;
            }
            result = __Pyx_Object_Vectorcall_CallFromBuilder(from_bytes, args+1, 2 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET, kwds);
        }
        limited_bad:
        Py_XDECREF(kwds);
        Py_XDECREF(order_str);
        Py_XDECREF(py_bytes);
        Py_XDECREF(from_bytes);
        return result;
#endif
    }
}

/* CIntFromPyVerify (used by CIntFromPy) */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
    __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
    {\
        func_type value = func_value;\
        if (sizeof(target_type) < sizeof(func_type)) {\
            if (unlikely(value != (func_type) (target_type) value)) {\
                func_type zero = 0;\
                if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
                    return (target_type) -1;\
                if (is_unsigned && unlikely(value < zero))\
                    goto raise_neg_overflow;\
                else\
                    goto raise_overflow;\
            }\
        }\
        return (target_type) value;\
    }

/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyLong_As_long(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const long neg_one = (long) -1, const_zero = (long) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        long val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (long) -1;
        val = __Pyx_PyLong_As_long(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) >= 2 * PyLong_SHIFT)) {
                            return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) >= 3 * PyLong_SHIFT)) {
                            return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) >= 4 * PyLong_SHIFT)) {
                            return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (long) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(long) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(long) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(long) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) {
                            return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) {
                            return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) {
                            return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) {
                            return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) {
                            return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) {
                            return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(long) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
        } else if ((sizeof(long) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        long val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (long) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (long) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (long) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (long) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(long) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((long) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(long) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((long) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((long) 1) << (sizeof(long) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (long) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to long");
    return (long) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to long");
    return (long) -1;
}

/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyLong_As_int(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
    const int neg_one = (int) -1, const_zero = (int) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
    const int is_unsigned = neg_one > const_zero;
    if (unlikely(!PyLong_Check(x))) {
        int val;
        PyObject *tmp = __Pyx_PyNumber_Long(x);
        if (!tmp) return (int) -1;
        val = __Pyx_PyLong_As_int(tmp);
        Py_DECREF(tmp);
        return val;
    }
    if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
        if (unlikely(__Pyx_PyLong_IsNeg(x))) {
            goto raise_neg_overflow;
        } else if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_DigitCount(x)) {
                case 2:
                    if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) >= 2 * PyLong_SHIFT)) {
                            return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) >= 3 * PyLong_SHIFT)) {
                            return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) >= 4 * PyLong_SHIFT)) {
                            return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
                        }
                    }
                    break;
            }
        }
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7
        if (unlikely(Py_SIZE(x) < 0)) {
            goto raise_neg_overflow;
        }
#else
        {
            int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
            if (unlikely(result < 0))
                return (int) -1;
            if (unlikely(result == 1))
                goto raise_neg_overflow;
        }
#endif
        if ((sizeof(int) <= sizeof(unsigned long))) {
            __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
        } else if ((sizeof(int) <= sizeof(unsigned PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
        }
    } else {
#if CYTHON_USE_PYLONG_INTERNALS
        if (__Pyx_PyLong_IsCompact(x)) {
            __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x))
        } else {
            const digit* digits = __Pyx_PyLong_Digits(x);
            assert(__Pyx_PyLong_DigitCount(x) > 1);
            switch (__Pyx_PyLong_SignedDigitCount(x)) {
                case -2:
                    if ((8 * sizeof(int) - 1 > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) {
                            return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
                case 2:
                    if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) {
                            return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
                case -3:
                    if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) {
                            return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
                case 3:
                    if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) {
                            return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
                case -4:
                    if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) {
                            return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
                case 4:
                    if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) {
                        if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) {
                            __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
                        } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) {
                            return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
                        }
                    }
                    break;
            }
        }
#endif
        if ((sizeof(int) <= sizeof(long))) {
            __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
        } else if ((sizeof(int) <= sizeof(PY_LONG_LONG))) {
            __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
        }
    }
    {
        int val;
        int ret = -1;
#if PY_VERSION_HEX >= 0x030d00A6 && !CYTHON_COMPILING_IN_LIMITED_API
        Py_ssize_t bytes_copied = PyLong_AsNativeBytes(
            x, &val, sizeof(val), Py_ASNATIVEBYTES_NATIVE_ENDIAN | (is_unsigned ? Py_ASNATIVEBYTES_UNSIGNED_BUFFER | Py_ASNATIVEBYTES_REJECT_NEGATIVE : 0));
        if (unlikely(bytes_copied == -1)) {
        } else if (unlikely(bytes_copied > (Py_ssize_t) sizeof(val))) {
            goto raise_overflow;
        } else {
            ret = 0;
        }
#elif PY_VERSION_HEX < 0x030d0000 && !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray)
        int one = 1; int is_little = (int)*(unsigned char *)&one;
        unsigned char *bytes = (unsigned char *)&val;
        ret = _PyLong_AsByteArray((PyLongObject *)x,
                                    bytes, sizeof(val),
                                    is_little, !is_unsigned);
#else
        PyObject *v;
        PyObject *stepval = NULL, *mask = NULL, *shift = NULL;
        int bits, remaining_bits, is_negative = 0;
        int chunk_size = (sizeof(long) < 8) ? 30 : 62;
        if (likely(PyLong_CheckExact(x))) {
            v = __Pyx_NewRef(x);
        } else {
            v = PyNumber_Long(x);
            if (unlikely(!v)) return (int) -1;
            assert(PyLong_CheckExact(v));
        }
        {
            int result = PyObject_RichCompareBool(v, Py_False, Py_LT);
            if (unlikely(result < 0)) {
                Py_DECREF(v);
                return (int) -1;
            }
            is_negative = result == 1;
        }
        if (is_unsigned && unlikely(is_negative)) {
            Py_DECREF(v);
            goto raise_neg_overflow;
        } else if (is_negative) {
            stepval = PyNumber_Invert(v);
            Py_DECREF(v);
            if (unlikely(!stepval))
                return (int) -1;
        } else {
            stepval = v;
        }
        v = NULL;
        val = (int) 0;
        mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done;
        shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done;
        for (bits = 0; bits < (int) sizeof(int) * 8 - chunk_size; bits += chunk_size) {
            PyObject *tmp, *digit;
            long idigit;
            digit = PyNumber_And(stepval, mask);
            if (unlikely(!digit)) goto done;
            idigit = PyLong_AsLong(digit);
            Py_DECREF(digit);
            if (unlikely(idigit < 0)) goto done;
            val |= ((int) idigit) << bits;
            tmp = PyNumber_Rshift(stepval, shift);
            if (unlikely(!tmp)) goto done;
            Py_DECREF(stepval); stepval = tmp;
        }
        Py_DECREF(shift); shift = NULL;
        Py_DECREF(mask); mask = NULL;
        {
            long idigit = PyLong_AsLong(stepval);
            if (unlikely(idigit < 0)) goto done;
            remaining_bits = ((int) sizeof(int) * 8) - bits - (is_unsigned ? 0 : 1);
            if (unlikely(idigit >= (1L << remaining_bits)))
                goto raise_overflow;
            val |= ((int) idigit) << bits;
        }
        if (!is_unsigned) {
            if (unlikely(val & (((int) 1) << (sizeof(int) * 8 - 1))))
                goto raise_overflow;
            if (is_negative)
                val = ~val;
        }
        ret = 0;
    done:
        Py_XDECREF(shift);
        Py_XDECREF(mask);
        Py_XDECREF(stepval);
#endif
        if (unlikely(ret))
            return (int) -1;
        return val;
    }
raise_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "value too large to convert to int");
    return (int) -1;
raise_neg_overflow:
    PyErr_SetString(PyExc_OverflowError,
        "can't convert negative value to int");
    return (int) -1;
}

/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
    while (a) {
        a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*);
        if (a == b)
            return 1;
    }
    return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
    PyObject *mro;
    if (a == b) return 1;
    mro = a->tp_mro;
    if (likely(mro)) {
        Py_ssize_t i, n;
        n = PyTuple_GET_SIZE(mro);
        for (i = 0; i < n; i++) {
            if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
                return 1;
        }
        return 0;
    }
    return __Pyx_InBases(a, b);
}
static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) {
    PyObject *mro;
    if (cls == a || cls == b) return 1;
    mro = cls->tp_mro;
    if (likely(mro)) {
        Py_ssize_t i, n;
        n = PyTuple_GET_SIZE(mro);
        for (i = 0; i < n; i++) {
            PyObject *base = PyTuple_GET_ITEM(mro, i);
            if (base == (PyObject *)a || base == (PyObject *)b)
                return 1;
        }
        return 0;
    }
    return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b);
}
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
    if (exc_type1) {
        return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2);
    } else {
        return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
    }
}
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
    Py_ssize_t i, n;
    assert(PyExceptionClass_Check(exc_type));
    n = PyTuple_GET_SIZE(tuple);
    for (i=0; i<n; i++) {
        if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
    }
    for (i=0; i<n; i++) {
        PyObject *t = PyTuple_GET_ITEM(tuple, i);
        if (likely(PyExceptionClass_Check(t))) {
            if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
        } else {
        }
    }
    return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
    if (likely(err == exc_type)) return 1;
    if (likely(PyExceptionClass_Check(err))) {
        if (likely(PyExceptionClass_Check(exc_type))) {
            return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
        } else if (likely(PyTuple_Check(exc_type))) {
            return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
        } else {
        }
    }
    return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
    assert(PyExceptionClass_Check(exc_type1));
    assert(PyExceptionClass_Check(exc_type2));
    if (likely(err == exc_type1 || err == exc_type2)) return 1;
    if (likely(PyExceptionClass_Check(err))) {
        return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
    }
    return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif

/* GetRuntimeVersion */
#if __PYX_LIMITED_VERSION_HEX < 0x030b0000
void __Pyx_init_runtime_version(void) {
    if (__Pyx_cached_runtime_version == 0) {
        const char* rt_version = Py_GetVersion();
        unsigned long version = 0;
        unsigned long factor = 0x01000000UL;
        unsigned int digit = 0;
        int i = 0;
        while (factor) {
            while ('0' <= rt_version[i] && rt_version[i] <= '9') {
                digit = digit * 10 + (unsigned int) (rt_version[i] - '0');
                ++i;
            }
            version += factor * digit;
            if (rt_version[i] != '.')
                break;
            digit = 0;
            factor >>= 8;
            ++i;
        }
        __Pyx_cached_runtime_version = version;
    }
}
#endif
static unsigned long __Pyx_get_runtime_version(void) {
#if __PYX_LIMITED_VERSION_HEX >= 0x030b0000
    return Py_Version & ~0xFFUL;
#else
    return __Pyx_cached_runtime_version;
#endif
}

/* AddModuleRef */
#if CYTHON_COMPILING_IN_CPYTHON_FREETHREADING
  static PyObject *__Pyx_PyImport_AddModuleObjectRef(PyObject *name) {
      PyObject *module_dict = PyImport_GetModuleDict();
      PyObject *m;
      if (PyMapping_GetOptionalItem(module_dict, name, &m) < 0) {
          return NULL;
      }
      if (m != NULL && PyModule_Check(m)) {
          return m;
      }
      Py_XDECREF(m);
      m = PyModule_NewObject(name);
      if (m == NULL)
          return NULL;
      if (PyDict_CheckExact(module_dict)) {
          PyObject *new_m;
          (void)PyDict_SetDefaultRef(module_dict, name, m, &new_m);
          Py_DECREF(m);
          return new_m;
      } else {
           if (PyObject_SetItem(module_dict, name, m) != 0) {
                Py_DECREF(m);
                return NULL;
            }
            return m;
      }
  }
  static PyObject *__Pyx_PyImport_AddModuleRef(const char *name) {
      PyObject *py_name = PyUnicode_FromString(name);
      if (!py_name) return NULL;
      PyObject *module = __Pyx_PyImport_AddModuleObjectRef(py_name);
      Py_DECREF(py_name);
      return module;
  }
#elif __PYX_LIMITED_VERSION_HEX >= 0x030d0000
  #define __Pyx_PyImport_AddModuleRef(name) PyImport_AddModuleRef(name)
#else
  static PyObject *__Pyx_PyImport_AddModuleRef(const char *name) {
      PyObject *module = PyImport_AddModule(name);
      Py_XINCREF(module);
      return module;
  }
#endif

/* CheckBinaryVersion */
static int __Pyx_check_binary_version(unsigned long ct_version, unsigned long rt_version, int allow_newer) {
    const unsigned long MAJOR_MINOR = 0xFFFF0000UL;
    if ((rt_version & MAJOR_MINOR) == (ct_version & MAJOR_MINOR))
        return 0;
    if (likely(allow_newer && (rt_version & MAJOR_MINOR) > (ct_version & MAJOR_MINOR)))
        return 1;
    {
        char message[200];
        PyOS_snprintf(message, sizeof(message),
                      "compile time Python version %d.%d "
                      "of module '%.100s' "
                      "%s "
                      "runtime version %d.%d",
                       (int) (ct_version >> 24), (int) ((ct_version >> 16) & 0xFF),
                       __Pyx_MODULE_NAME,
                       (allow_newer) ? "was newer than" : "does not match",
                       (int) (rt_version >> 24), (int) ((rt_version >> 16) & 0xFF)
       );
        return PyErr_WarnEx(NULL, message, 1);
    }
}

/* DecompressString */
static PyObject *__Pyx_DecompressString(const char *s, Py_ssize_t length, int algo) {
    PyObject *module, *decompress, *compressed_bytes, *decompressed;
    const char* module_name = algo == 3 ? "compression.zstd" : algo == 2 ? "bz2" : "zlib";
    PyObject *methodname = PyUnicode_FromString("decompress");
    if (unlikely(!methodname)) return NULL;
    #if __PYX_LIMITED_VERSION_HEX >= 0x030e0000
    if (algo == 3) {
        PyObject *fromlist = Py_BuildValue("[O]", methodname);
        if (unlikely(!fromlist)) return NULL;
        module = PyImport_ImportModuleLevel("compression.zstd", NULL, NULL, fromlist, 0);
        Py_DECREF(fromlist);
    } else
    #endif
        module = PyImport_ImportModule(module_name);
    if (unlikely(!module)) goto import_failed;
    decompress = PyObject_GetAttr(module, methodname);
    if (unlikely(!decompress)) goto import_failed;
    {
        #ifdef __cplusplus
            char *memview_bytes = const_cast<char*>(s);
        #else
            #if defined(__clang__)
              #pragma clang diagnostic push
              #pragma clang diagnostic ignored "-Wcast-qual"
            #elif !defined(__INTEL_COMPILER) && defined(__GNUC__)
              #pragma GCC diagnostic push
              #pragma GCC diagnostic ignored "-Wcast-qual"
            #endif
            char *memview_bytes = (char*) s;
            #if defined(__clang__)
              #pragma clang diagnostic pop
            #elif !defined(__INTEL_COMPILER) && defined(__GNUC__)
              #pragma GCC diagnostic pop
            #endif
        #endif
        #if CYTHON_COMPILING_IN_LIMITED_API && !defined(PyBUF_READ)
        int memview_flags = 0x100;
        #else
        int memview_flags = PyBUF_READ;
        #endif
        compressed_bytes = PyMemoryView_FromMemory(memview_bytes, length, memview_flags);
    }
    if (unlikely(!compressed_bytes)) {
        Py_DECREF(decompress);
        goto bad;
    }
    decompressed = PyObject_CallFunctionObjArgs(decompress, compressed_bytes, NULL);
    Py_DECREF(compressed_bytes);
    Py_DECREF(decompress);
    Py_DECREF(module);
    Py_DECREF(methodname);
    return decompressed;
import_failed:
    PyErr_Format(PyExc_ImportError,
        "Failed to import '%.20s.decompress' - cannot initialise module strings. "
        "String compression was configured with the C macro 'CYTHON_COMPRESS_STRINGS=%d'.",
        module_name, algo);
bad:
    Py_XDECREF(module);
    Py_DECREF(methodname);
    return NULL;
}

#include <string.h>
static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s) {
    size_t len = strlen(s);
    if (unlikely(len > (size_t) PY_SSIZE_T_MAX)) {
        PyErr_SetString(PyExc_OverflowError, "byte string is too long");
        return -1;
    }
    return (Py_ssize_t) len;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
    Py_ssize_t len = __Pyx_ssize_strlen(c_str);
    if (unlikely(len < 0)) return NULL;
    return __Pyx_PyUnicode_FromStringAndSize(c_str, len);
}
static CYTHON_INLINE PyObject* __Pyx_PyByteArray_FromString(const char* c_str) {
    Py_ssize_t len = __Pyx_ssize_strlen(c_str);
    if (unlikely(len < 0)) return NULL;
    return PyByteArray_FromStringAndSize(c_str, len);
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
    Py_ssize_t ignore;
    return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_UTF8
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
    if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if CYTHON_COMPILING_IN_LIMITED_API
    {
        const char* result;
        Py_ssize_t unicode_length;
        CYTHON_MAYBE_UNUSED_VAR(unicode_length); // only for __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
        #if __PYX_LIMITED_VERSION_HEX < 0x030A0000
        if (unlikely(PyArg_Parse(o, "s#", &result, length) < 0)) return NULL;
        #else
        result = PyUnicode_AsUTF8AndSize(o, length);
        #endif
        #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
        unicode_length = PyUnicode_GetLength(o);
        if (unlikely(unicode_length < 0)) return NULL;
        if (unlikely(unicode_length != *length)) {
            PyUnicode_AsASCIIString(o);
            return NULL;
        }
        #endif
        return result;
    }
#else
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
    if (likely(PyUnicode_IS_ASCII(o))) {
        *length = PyUnicode_GET_LENGTH(o);
        return PyUnicode_AsUTF8(o);
    } else {
        PyUnicode_AsASCIIString(o);
        return NULL;
    }
#else
    return PyUnicode_AsUTF8AndSize(o, length);
#endif
#endif
}
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_UTF8
    if (PyUnicode_Check(o)) {
        return __Pyx_PyUnicode_AsStringAndSize(o, length);
    } else
#endif
    if (PyByteArray_Check(o)) {
#if (CYTHON_ASSUME_SAFE_SIZE && CYTHON_ASSUME_SAFE_MACROS) || (CYTHON_COMPILING_IN_PYPY && (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)))
        *length = PyByteArray_GET_SIZE(o);
        return PyByteArray_AS_STRING(o);
#else
        *length = PyByteArray_Size(o);
        if (*length == -1) return NULL;
        return PyByteArray_AsString(o);
#endif
    } else
    {
        char* result;
        int r = PyBytes_AsStringAndSize(o, &result, length);
        if (unlikely(r < 0)) {
            return NULL;
        } else {
            return result;
        }
    }
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
   int is_true = x == Py_True;
   if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
   else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
    int retval;
    if (unlikely(!x)) return -1;
    retval = __Pyx_PyObject_IsTrue(x);
    Py_DECREF(x);
    return retval;
}
static PyObject* __Pyx_PyNumber_LongWrongResultType(PyObject* result) {
    __Pyx_TypeName result_type_name = __Pyx_PyType_GetFullyQualifiedName(Py_TYPE(result));
    if (PyLong_Check(result)) {
        if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
                "__int__ returned non-int (type " __Pyx_FMT_TYPENAME ").  "
                "The ability to return an instance of a strict subclass of int is deprecated, "
                "and may be removed in a future version of Python.",
                result_type_name)) {
            __Pyx_DECREF_TypeName(result_type_name);
            Py_DECREF(result);
            return NULL;
        }
        __Pyx_DECREF_TypeName(result_type_name);
        return result;
    }
    PyErr_Format(PyExc_TypeError,
                 "__int__ returned non-int (type " __Pyx_FMT_TYPENAME ")",
                 result_type_name);
    __Pyx_DECREF_TypeName(result_type_name);
    Py_DECREF(result);
    return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Long(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
  PyNumberMethods *m;
#endif
  PyObject *res = NULL;
  if (likely(PyLong_Check(x)))
      return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
  m = Py_TYPE(x)->tp_as_number;
  if (likely(m && m->nb_int)) {
      res = m->nb_int(x);
  }
#else
  if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
      res = PyNumber_Long(x);
  }
#endif
  if (likely(res)) {
      if (unlikely(!PyLong_CheckExact(res))) {
          return __Pyx_PyNumber_LongWrongResultType(res);
      }
  }
  else if (!PyErr_Occurred()) {
      PyErr_SetString(PyExc_TypeError,
                      "an integer is required");
  }
  return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
  Py_ssize_t ival;
  PyObject *x;
  if (likely(PyLong_CheckExact(b))) {
    #if CYTHON_USE_PYLONG_INTERNALS
    if (likely(__Pyx_PyLong_IsCompact(b))) {
        return __Pyx_PyLong_CompactValue(b);
    } else {
      const digit* digits = __Pyx_PyLong_Digits(b);
      const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(b);
      switch (size) {
         case 2:
           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
             return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
         case -2:
           if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
             return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
         case 3:
           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
             return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
         case -3:
           if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
             return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
         case 4:
           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
             return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
         case -4:
           if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
             return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
           }
           break;
      }
    }
    #endif
    return PyLong_AsSsize_t(b);
  }
  x = PyNumber_Index(b);
  if (!x) return -1;
  ival = PyLong_AsSsize_t(x);
  Py_DECREF(x);
  return ival;
}
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) {
  if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) {
    return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o);
  } else {
    Py_ssize_t ival;
    PyObject *x;
    x = PyNumber_Index(o);
    if (!x) return -1;
    ival = PyLong_AsLong(x);
    Py_DECREF(x);
    return ival;
  }
}
static CYTHON_INLINE PyObject *__Pyx_Owned_Py_None(int b) {
    CYTHON_UNUSED_VAR(b);
    return __Pyx_NewRef(Py_None);
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
  return __Pyx_NewRef(b ? Py_True: Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyLong_FromSize_t(size_t ival) {
    return PyLong_FromSize_t(ival);
}


/* MultiPhaseInitModuleState */
#if CYTHON_PEP489_MULTI_PHASE_INIT && CYTHON_USE_MODULE_STATE
#ifndef CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
#if (CYTHON_COMPILING_IN_LIMITED_API || PY_VERSION_HEX >= 0x030C0000)
  #define CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE 1
#else
  #define CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE 0
#endif
#endif
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE && !CYTHON_ATOMICS
#error "Module state with PEP489 requires atomics. Currently that's one of\
 C11, C++11, gcc atomic intrinsics or MSVC atomic intrinsics"
#endif
#if !CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
#define __Pyx_ModuleStateLookup_Lock()
#define __Pyx_ModuleStateLookup_Unlock()
#elif !CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX >= 0x030d0000
static PyMutex __Pyx_ModuleStateLookup_mutex = {0};
#define __Pyx_ModuleStateLookup_Lock() PyMutex_Lock(&__Pyx_ModuleStateLookup_mutex)
#define __Pyx_ModuleStateLookup_Unlock() PyMutex_Unlock(&__Pyx_ModuleStateLookup_mutex)
#elif defined(__cplusplus) && __cplusplus >= 201103L
#include <mutex>
static std::mutex __Pyx_ModuleStateLookup_mutex;
#define __Pyx_ModuleStateLookup_Lock() __Pyx_ModuleStateLookup_mutex.lock()
#define __Pyx_ModuleStateLookup_Unlock() __Pyx_ModuleStateLookup_mutex.unlock()
#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201112L) && !defined(__STDC_NO_THREADS__)
#include <threads.h>
static mtx_t __Pyx_ModuleStateLookup_mutex;
static once_flag __Pyx_ModuleStateLookup_mutex_once_flag = ONCE_FLAG_INIT;
static void __Pyx_ModuleStateLookup_initialize_mutex(void) {
    mtx_init(&__Pyx_ModuleStateLookup_mutex, mtx_plain);
}
#define __Pyx_ModuleStateLookup_Lock()\
  call_once(&__Pyx_ModuleStateLookup_mutex_once_flag, __Pyx_ModuleStateLookup_initialize_mutex);\
  mtx_lock(&__Pyx_ModuleStateLookup_mutex)
#define __Pyx_ModuleStateLookup_Unlock() mtx_unlock(&__Pyx_ModuleStateLookup_mutex)
#elif defined(HAVE_PTHREAD_H)
#include <pthread.h>
static pthread_mutex_t __Pyx_ModuleStateLookup_mutex = PTHREAD_MUTEX_INITIALIZER;
#define __Pyx_ModuleStateLookup_Lock() pthread_mutex_lock(&__Pyx_ModuleStateLookup_mutex)
#define __Pyx_ModuleStateLookup_Unlock() pthread_mutex_unlock(&__Pyx_ModuleStateLookup_mutex)
#elif defined(_WIN32)
#include <Windows.h>  // synchapi.h on its own doesn't work
static SRWLOCK __Pyx_ModuleStateLookup_mutex = SRWLOCK_INIT;
#define __Pyx_ModuleStateLookup_Lock() AcquireSRWLockExclusive(&__Pyx_ModuleStateLookup_mutex)
#define __Pyx_ModuleStateLookup_Unlock() ReleaseSRWLockExclusive(&__Pyx_ModuleStateLookup_mutex)
#else
#error "No suitable lock available for CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE.\
 Requires C standard >= C11, or C++ standard >= C++11,\
 or pthreads, or the Windows 32 API, or Python >= 3.13."
#endif
typedef struct {
    int64_t id;
    PyObject *module;
} __Pyx_InterpreterIdAndModule;
typedef struct {
    char interpreter_id_as_index;
    Py_ssize_t count;
    Py_ssize_t allocated;
    __Pyx_InterpreterIdAndModule table[1];
} __Pyx_ModuleStateLookupData;
#define __PYX_MODULE_STATE_LOOKUP_SMALL_SIZE 32
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
static __pyx_atomic_int_type __Pyx_ModuleStateLookup_read_counter = 0;
#endif
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
static __pyx_atomic_ptr_type __Pyx_ModuleStateLookup_data = 0;
#else
static __Pyx_ModuleStateLookupData* __Pyx_ModuleStateLookup_data = NULL;
#endif
static __Pyx_InterpreterIdAndModule* __Pyx_State_FindModuleStateLookupTableLowerBound(
        __Pyx_InterpreterIdAndModule* table,
        Py_ssize_t count,
        int64_t interpreterId) {
    __Pyx_InterpreterIdAndModule* begin = table;
    __Pyx_InterpreterIdAndModule* end = begin + count;
    if (begin->id == interpreterId) {
        return begin;
    }
    while ((end - begin) > __PYX_MODULE_STATE_LOOKUP_SMALL_SIZE) {
        __Pyx_InterpreterIdAndModule* halfway = begin + (end - begin)/2;
        if (halfway->id == interpreterId) {
            return halfway;
        }
        if (halfway->id < interpreterId) {
            begin = halfway;
        } else {
            end = halfway;
        }
    }
    for (; begin < end; ++begin) {
        if (begin->id >= interpreterId) return begin;
    }
    return begin;
}
static PyObject *__Pyx_State_FindModule(CYTHON_UNUSED void* dummy) {
    int64_t interpreter_id = PyInterpreterState_GetID(__Pyx_PyInterpreterState_Get());
    if (interpreter_id == -1) return NULL;
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
    __Pyx_ModuleStateLookupData* data = (__Pyx_ModuleStateLookupData*)__pyx_atomic_pointer_load_relaxed(&__Pyx_ModuleStateLookup_data);
    {
        __pyx_atomic_incr_acq_rel(&__Pyx_ModuleStateLookup_read_counter);
        if (likely(data)) {
            __Pyx_ModuleStateLookupData* new_data = (__Pyx_ModuleStateLookupData*)__pyx_atomic_pointer_load_acquire(&__Pyx_ModuleStateLookup_data);
            if (likely(data == new_data)) {
                goto read_finished;
            }
        }
        __pyx_atomic_decr_acq_rel(&__Pyx_ModuleStateLookup_read_counter);
        __Pyx_ModuleStateLookup_Lock();
        __pyx_atomic_incr_relaxed(&__Pyx_ModuleStateLookup_read_counter);
        data = (__Pyx_ModuleStateLookupData*)__pyx_atomic_pointer_load_relaxed(&__Pyx_ModuleStateLookup_data);
        __Pyx_ModuleStateLookup_Unlock();
    }
  read_finished:;
#else
    __Pyx_ModuleStateLookupData* data = __Pyx_ModuleStateLookup_data;
#endif
    __Pyx_InterpreterIdAndModule* found = NULL;
    if (unlikely(!data)) goto end;
    if (data->interpreter_id_as_index) {
        if (interpreter_id < data->count) {
            found = data->table+interpreter_id;
        }
    } else {
        found = __Pyx_State_FindModuleStateLookupTableLowerBound(
            data->table, data->count, interpreter_id);
    }
  end:
    {
        PyObject *result=NULL;
        if (found && found->id == interpreter_id) {
            result = found->module;
        }
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
        __pyx_atomic_decr_acq_rel(&__Pyx_ModuleStateLookup_read_counter);
#endif
        return result;
    }
}
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
static void __Pyx_ModuleStateLookup_wait_until_no_readers(void) {
    while (__pyx_atomic_load(&__Pyx_ModuleStateLookup_read_counter) != 0);
}
#else
#define __Pyx_ModuleStateLookup_wait_until_no_readers()
#endif
static int __Pyx_State_AddModuleInterpIdAsIndex(__Pyx_ModuleStateLookupData **old_data, PyObject* module, int64_t interpreter_id) {
    Py_ssize_t to_allocate = (*old_data)->allocated;
    while (to_allocate <= interpreter_id) {
        if (to_allocate == 0) to_allocate = 1;
        else to_allocate *= 2;
    }
    __Pyx_ModuleStateLookupData *new_data = *old_data;
    if (to_allocate != (*old_data)->allocated) {
         new_data = (__Pyx_ModuleStateLookupData *)realloc(
            *old_data,
            sizeof(__Pyx_ModuleStateLookupData)+(to_allocate-1)*sizeof(__Pyx_InterpreterIdAndModule));
        if (!new_data) {
            PyErr_NoMemory();
            return -1;
        }
        for (Py_ssize_t i = new_data->allocated; i < to_allocate; ++i) {
            new_data->table[i].id = i;
            new_data->table[i].module = NULL;
        }
        new_data->allocated = to_allocate;
    }
    new_data->table[interpreter_id].module = module;
    if (new_data->count < interpreter_id+1) {
        new_data->count = interpreter_id+1;
    }
    *old_data = new_data;
    return 0;
}
static void __Pyx_State_ConvertFromInterpIdAsIndex(__Pyx_ModuleStateLookupData *data) {
    __Pyx_InterpreterIdAndModule *read = data->table;
    __Pyx_InterpreterIdAndModule *write = data->table;
    __Pyx_InterpreterIdAndModule *end = read + data->count;
    for (; read<end; ++read) {
        if (read->module) {
            write->id = read->id;
            write->module = read->module;
            ++write;
        }
    }
    data->count = write - data->table;
    for (; write<end; ++write) {
        write->id = 0;
        write->module = NULL;
    }
    data->interpreter_id_as_index = 0;
}
static int __Pyx_State_AddModule(PyObject* module, CYTHON_UNUSED void* dummy) {
    int64_t interpreter_id = PyInterpreterState_GetID(__Pyx_PyInterpreterState_Get());
    if (interpreter_id == -1) return -1;
    int result = 0;
    __Pyx_ModuleStateLookup_Lock();
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
    __Pyx_ModuleStateLookupData *old_data = (__Pyx_ModuleStateLookupData *)
            __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, 0);
#else
    __Pyx_ModuleStateLookupData *old_data = __Pyx_ModuleStateLookup_data;
#endif
    __Pyx_ModuleStateLookupData *new_data = old_data;
    if (!new_data) {
        new_data = (__Pyx_ModuleStateLookupData *)calloc(1, sizeof(__Pyx_ModuleStateLookupData));
        if (!new_data) {
            result = -1;
            PyErr_NoMemory();
            goto end;
        }
        new_data->allocated = 1;
        new_data->interpreter_id_as_index = 1;
    }
    __Pyx_ModuleStateLookup_wait_until_no_readers();
    if (new_data->interpreter_id_as_index) {
        if (interpreter_id < __PYX_MODULE_STATE_LOOKUP_SMALL_SIZE) {
            result = __Pyx_State_AddModuleInterpIdAsIndex(&new_data, module, interpreter_id);
            goto end;
        }
        __Pyx_State_ConvertFromInterpIdAsIndex(new_data);
    }
    {
        Py_ssize_t insert_at = 0;
        {
            __Pyx_InterpreterIdAndModule* lower_bound = __Pyx_State_FindModuleStateLookupTableLowerBound(
                new_data->table, new_data->count, interpreter_id);
            assert(lower_bound);
            insert_at = lower_bound - new_data->table;
            if (unlikely(insert_at < new_data->count && lower_bound->id == interpreter_id)) {
                lower_bound->module = module;
                goto end;  // already in table, nothing more to do
            }
        }
        if (new_data->count+1 >= new_data->allocated) {
            Py_ssize_t to_allocate = (new_data->count+1)*2;
            new_data =
                (__Pyx_ModuleStateLookupData*)realloc(
                    new_data,
                    sizeof(__Pyx_ModuleStateLookupData) +
                    (to_allocate-1)*sizeof(__Pyx_InterpreterIdAndModule));
            if (!new_data) {
                result = -1;
                new_data = old_data;
                PyErr_NoMemory();
                goto end;
            }
            new_data->allocated = to_allocate;
        }
        ++new_data->count;
        int64_t last_id = interpreter_id;
        PyObject *last_module = module;
        for (Py_ssize_t i=insert_at; i<new_data->count; ++i) {
            int64_t current_id = new_data->table[i].id;
            new_data->table[i].id = last_id;
            last_id = current_id;
            PyObject *current_module = new_data->table[i].module;
            new_data->table[i].module = last_module;
            last_module = current_module;
        }
    }
  end:
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
    __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, new_data);
#else
    __Pyx_ModuleStateLookup_data = new_data;
#endif
    __Pyx_ModuleStateLookup_Unlock();
    return result;
}
static int __Pyx_State_RemoveModule(CYTHON_UNUSED void* dummy) {
    int64_t interpreter_id = PyInterpreterState_GetID(__Pyx_PyInterpreterState_Get());
    if (interpreter_id == -1) return -1;
    __Pyx_ModuleStateLookup_Lock();
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
    __Pyx_ModuleStateLookupData *data = (__Pyx_ModuleStateLookupData *)
            __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, 0);
#else
    __Pyx_ModuleStateLookupData *data = __Pyx_ModuleStateLookup_data;
#endif
    if (data->interpreter_id_as_index) {
        if (interpreter_id < data->count) {
            data->table[interpreter_id].module = NULL;
        }
        goto done;
    }
    {
        __Pyx_ModuleStateLookup_wait_until_no_readers();
        __Pyx_InterpreterIdAndModule* lower_bound = __Pyx_State_FindModuleStateLookupTableLowerBound(
            data->table, data->count, interpreter_id);
        if (!lower_bound) goto done;
        if (lower_bound->id != interpreter_id) goto done;
        __Pyx_InterpreterIdAndModule *end = data->table+data->count;
        for (;lower_bound<end-1; ++lower_bound) {
            lower_bound->id = (lower_bound+1)->id;
            lower_bound->module = (lower_bound+1)->module;
        }
    }
    --data->count;
    if (data->count == 0) {
        free(data);
        data = NULL;
    }
  done:
#if CYTHON_MODULE_STATE_LOOKUP_THREAD_SAFE
    __pyx_atomic_pointer_exchange(&__Pyx_ModuleStateLookup_data, data);
#else
    __Pyx_ModuleStateLookup_data = data;
#endif
    __Pyx_ModuleStateLookup_Unlock();
    return 0;
}
#endif

/* #### Code section: utility_code_pragmas_end ### */
#ifdef _MSC_VER
#pragma warning( pop )
#endif



/* #### Code section: end ### */
#endif /* Py_PYTHON_H */
